f0 change regardless of interlocutor (per speaker; using categorical values)
f0 change regardless of interlocutor (all speakers together; using continuous values)
f0 entrainment
ind <- data.frame(matrix(nrow=0, ncol=6))
names(ind) <- c("speaker", "condition", "feature", "section", "t", "p")
for(s in unique(ipus$speaker)){
# f0 median
c <- tidy(lm(f0medz ~ turnOverall, ipus |> filter(speaker==s)))
ind[nrow(ind)+1,] <- c(s, unique(ipus$condition[ipus$speaker==s]), "f0median", "entireExp",
c$statistic[2], c$p.value[2])
c <- tidy(lm(f0medz ~ turnOverall, ipus |> filter(speaker==s, task=="Lists")))
ind[nrow(ind)+1,] <- c(s, unique(ipus$condition[ipus$speaker==s]), "f0median", "Lists",
c$statistic[2], c$p.value[2])
if(!all(is.na(ipus$f0medz[ipus$speaker==s & ipus$task=="Diapix"]))){ # need to do this because there are missing files for one dyad for this section
c <- tidy(lm(f0medz ~ turnOverall, ipus |> filter(speaker==s, task=="Diapix")))
ind[nrow(ind)+1,] <- c(s, unique(ipus$condition[ipus$speaker==s]), "f0median", "Diapix",
c$statistic[2], c$p.value[2])
}
# f0 max
c <- tidy(lm(f0maxz ~ turnOverall, ipus |> filter(speaker==s)))
ind[nrow(ind)+1,] <- c(s, unique(ipus$condition[ipus$speaker==s]), "f0max", "entireExp",
c$statistic[2], c$p.value[2])
c <- tidy(lm(f0maxz ~ turnOverall, ipus |> filter(speaker==s, task=="Lists")))
ind[nrow(ind)+1,] <- c(s, unique(ipus$condition[ipus$speaker==s]), "f0max", "Lists",
c$statistic[2], c$p.value[2])
if(!all(is.na(ipus$f0maxz[ipus$speaker==s & ipus$task=="Diapix"]))){ # need to do this because there are missing files for one dyad for this section
c <- tidy(lm(f0maxz ~ turnOverall, ipus |> filter(speaker==s, task=="Diapix")))
ind[nrow(ind)+1,] <- c(s, unique(ipus$condition[ipus$speaker==s]), "f0max", "Diapix",
c$statistic[2], c$p.value[2])
}
# f0 SD
c <- tidy(lm(f0sdz ~ turnOverall, ipus |> filter(speaker==s)))
ind[nrow(ind)+1,] <- c(s, unique(ipus$condition[ipus$speaker==s]), "f0SD", "entireExp",
c$statistic[2], c$p.value[2])
c <- tidy(lm(f0sdz ~ turnOverall, ipus |> filter(speaker==s, task=="Lists")))
ind[nrow(ind)+1,] <- c(s, unique(ipus$condition[ipus$speaker==s]), "f0SD", "Lists",
c$statistic[2], c$p.value[2])
if(!all(is.na(ipus$f0sdz[ipus$speaker==s & ipus$task=="Diapix"]))){ # need to do this because there are missing files for one dyad for this section
c <- tidy(lm(f0sdz ~ turnOverall, ipus |> filter(speaker==s, task=="Diapix")))
ind[nrow(ind)+1,] <- c(s, unique(ipus$condition[ipus$speaker==s]), "f0SD", "Diapix",
c$statistic[2], c$p.value[2])
}
}
dat <- merge(ind |>
mutate_at(c("t", "p"), as.numeric) |>
mutate(change = case_when(
p < 0.05 & t < 0 ~ "decrease",
p < 0.05 & t > 0 ~ "increase",
p > 0.05 ~ "ns"
)),
ipus |> select(speaker, gender, comfortPre, comfortPost, closeness, similarity, likeability, becomeFriends, extraversion, openness, agreeableness, conscientiousness, neuroticism) |> filter(!duplicated(speaker)),
by = "speaker") |>
mutate_at(c("speaker", "condition", "feature", "section", "change", "gender"), as.factor)
dat$change <- relevel(dat$change, ref="ns")
# join individual temperature change effects
indTemp <- indTemp |>
mutate(effect = case_when(
effect == "ns" ~ "tempNS",
effect == "increase" ~ "tempIncrease",
effect == "decrease" ~ "tempDecrease",
))
s <- dat |>
mutate(change = case_when(
change == "ns" ~ "f0NS",
change == "increase" ~ "f0Increase",
change == "decrease" ~ "f0Decrease",
))
t1 <- indTemp |>
filter(section=="entireExp") |>
group_by(speaker) |>
pivot_wider(names_from = ROI, values_from = effect, id_cols=speaker) |>
mutate(section = "entireExp")
t2 <- indTemp |>
filter(section=="Lists") |>
group_by(speaker) |>
pivot_wider(names_from = ROI, values_from = effect, id_cols=speaker) |>
mutate(section = "Lists")
t3 <- indTemp |>
filter(section=="Diapix") |>
group_by(speaker) |>
pivot_wider(names_from = ROI, values_from = effect, id_cols=speaker) |>
mutate(section = "Diapix")
# mutate(section = "Lists") # naming the Diapix section and "Lists" to pair these values with the f0 adaptation during "Lists", since temperature change takes a while to happen
# this didn't bring about any new insights
t <- rbind(t1, rbind(t2, t3))
# t <- rbind(t1, t3)
ts <- merge(t,
s |>
select(-condition),
by=c("speaker", "section"), all=TRUE) |>
mutate_at(c("speaker", "section", "Forehead", "Eyes", "Nose", "Cheeks", "feature", "change", "gender"), as.factor)
ts$Forehead <- relevel(ts$Forehead, ref="tempNS")
ts$Cheeks <- relevel(ts$Cheeks, ref="tempNS")
ts$Eyes <- relevel(ts$Eyes, ref="tempNS")
ts$Nose <- relevel(ts$Nose, ref="tempNS")
ts$change <- relevel(ts$change, ref="f0NS")
No effect of condition
ggplot(dat |> filter(feature=="f0median", section=="Lists"), aes(condition, fill=change))+
geom_bar()+
scale_fill_manual(values = c("decrease" = "lightblue", "increase" = "red", "ns" = "gray"))+
ggtitle("f0 median (first part of exp - Lists)")
tidy(m <- nnet::multinom(change ~ condition, data=dat |> filter(feature=="f0median", section=="Lists"))) |>
as_tibble() |>
mutate(estimate = plogis(estimate)) # plogis transforms log odds (the output of the model) into probabilities
## # weights: 9 (4 variable)
## initial value 41.747267
## iter 10 value 7.962800
## iter 20 value 7.835155
## iter 30 value 7.832797
## iter 40 value 7.832516
## iter 40 value 7.832516
## iter 40 value 7.832516
## final value 7.832516
## converged
## # A tibble: 4 × 6
## y.level term estimate std.error statistic p.value
## <chr> <chr> <dbl> <dbl> <dbl> <dbl>
## 1 decrease (Intercept) 0.00000559 97.0 -0.125 0.901
## 2 decrease conditionimpersonal 1.00 97.0 0.0954 0.924
## 3 increase (Intercept) 0.0500 1.03 -2.87 0.00411
## 4 increase conditionimpersonal 0.0000450 158. -0.0635 0.949
emmeans(m, pairwise~condition, by="change")
## $emmeans
## change = ns:
## condition prob SE df lower.CL upper.CL
## close 9.50e-01 0.048735 4 0.814687 1.085308
## impersonal 9.44e-01 0.053990 4 0.794545 1.094345
##
## change = decrease:
## condition prob SE df lower.CL upper.CL
## close 5.30e-06 0.000515 4 -0.001425 0.001436
## impersonal 5.56e-02 0.053989 4 -0.094345 0.205450
##
## change = increase:
## condition prob SE df lower.CL upper.CL
## close 5.00e-02 0.048733 4 -0.085306 0.185301
## impersonal 2.20e-06 0.000352 4 -0.000976 0.000981
##
## Confidence level used: 0.95
##
## $contrasts
## change = ns:
## contrast estimate SE df t.ratio p.value
## close - impersonal 0.00555 0.0727 4 0.076 0.9428
##
## change = decrease:
## contrast estimate SE df t.ratio p.value
## close - impersonal -0.05555 0.0540 4 -1.029 0.3617
##
## change = increase:
## contrast estimate SE df t.ratio p.value
## close - impersonal 0.05000 0.0487 4 1.026 0.3629
No effect of perception of partner
tidy(m <- nnet::multinom(change ~ closeness, data=dat |> filter(feature=="f0median", section=="Lists"))) |>
as_tibble() |>
mutate(estimate = plogis(estimate))
## # weights: 9 (4 variable)
## initial value 41.747267
## iter 10 value 8.110318
## iter 20 value 7.931639
## iter 30 value 7.931302
## final value 7.931302
## converged
## # A tibble: 4 × 6
## y.level term estimate std.error statistic p.value
## <chr> <chr> <dbl> <dbl> <dbl> <dbl>
## 1 decrease (Intercept) 0.168 2.07 -0.773 0.439
## 2 decrease closeness 0.325 0.880 -0.832 0.405
## 3 increase (Intercept) 0.000858 3.95 -1.79 0.0742
## 4 increase closeness 0.671 0.647 1.10 0.271
tidy(m <- nnet::multinom(change ~ similarity, data=dat |> filter(feature=="f0median", section=="Lists"))) |>
as_tibble() |>
mutate(estimate = plogis(estimate))
## # weights: 9 (4 variable)
## initial value 41.747267
## iter 10 value 9.028983
## iter 20 value 8.448229
## iter 30 value 8.441630
## final value 8.441627
## converged
## # A tibble: 4 × 6
## y.level term estimate std.error statistic p.value
## <chr> <chr> <dbl> <dbl> <dbl> <dbl>
## 1 decrease (Intercept) 0.321 3.03 -0.247 0.805
## 2 decrease similarity 0.356 0.704 -0.845 0.398
## 3 increase (Intercept) 0.000961 5.20 -1.34 0.181
## 4 increase similarity 0.629 0.732 0.721 0.471
tidy(m <- nnet::multinom(change ~ likeability, data=dat |> filter(feature=="f0median", section=="Lists"))) |>
as_tibble() |>
mutate(estimate = plogis(estimate))
## # weights: 9 (4 variable)
## initial value 41.747267
## iter 10 value 8.964779
## iter 20 value 8.844134
## final value 8.843673
## converged
## # A tibble: 4 × 6
## y.level term estimate std.error statistic p.value
## <chr> <chr> <dbl> <dbl> <dbl> <dbl>
## 1 decrease (Intercept) 0.0155 5.15 -0.807 0.420
## 2 decrease likeability 0.521 0.721 0.114 0.909
## 3 increase (Intercept) 0.000111 7.91 -1.15 0.249
## 4 increase likeability 0.677 0.984 0.751 0.453
tidy(m <- nnet::multinom(change ~ becomeFriends, data=dat |> filter(feature=="f0median", section=="Lists"))) |>
as_tibble() |>
mutate(estimate = plogis(estimate))
## # weights: 9 (4 variable)
## initial value 41.747267
## iter 10 value 8.996318
## final value 8.770797
## converged
## # A tibble: 4 × 6
## y.level term estimate std.error statistic p.value
## <chr> <chr> <dbl> <dbl> <dbl> <dbl>
## 1 decrease (Intercept) 0.00132 5.37 -1.24 0.217
## 2 decrease becomeFriends 0.615 0.756 0.621 0.534
## 3 increase (Intercept) 0.00132 5.37 -1.24 0.217
## 4 increase becomeFriends 0.615 0.756 0.621 0.534
No effect of BFI scores
tidy(m <- nnet::multinom(change ~ extraversion, data=dat |> filter(feature=="f0median", section=="Lists"))) |>
as_tibble() |>
mutate(estimate = plogis(estimate))
## # weights: 9 (4 variable)
## initial value 41.747267
## iter 10 value 7.850898
## iter 20 value 7.354077
## iter 30 value 7.347693
## iter 40 value 7.347368
## final value 7.347367
## converged
## # A tibble: 4 × 6
## y.level term estimate std.error statistic p.value
## <chr> <chr> <dbl> <dbl> <dbl> <dbl>
## 1 decrease (Intercept) 1.00 13.7 0.921 0.357
## 2 decrease extraversion 0.00380 5.18 -1.08 0.282
## 3 increase (Intercept) 0.904 6.97 0.321 0.748
## 4 increase extraversion 0.141 2.29 -0.790 0.430
tidy(m <- nnet::multinom(change ~ openness, data=dat |> filter(feature=="f0median", section=="Lists"))) |>
as_tibble() |>
mutate(estimate = plogis(estimate))
## # weights: 9 (4 variable)
## initial value 41.747267
## iter 10 value 9.012340
## final value 9.012328
## converged
## # A tibble: 4 × 6
## y.level term estimate std.error statistic p.value
## <chr> <chr> <dbl> <dbl> <dbl> <dbl>
## 1 decrease (Intercept) 0.165 4.18 -0.389 0.697
## 2 decrease openness 0.358 1.28 -0.458 0.647
## 3 increase (Intercept) 0.165 4.18 -0.389 0.697
## 4 increase openness 0.358 1.28 -0.458 0.647
tidy(m <- nnet::multinom(change ~ agreeableness, data=dat |> filter(feature=="f0median", section=="Lists"))) |>
as_tibble() |>
mutate(estimate = plogis(estimate))
## # weights: 9 (4 variable)
## initial value 41.747267
## iter 10 value 7.676414
## iter 20 value 6.349851
## iter 30 value 6.148420
## iter 40 value 6.087807
## iter 50 value 6.035148
## iter 60 value 5.984933
## iter 70 value 5.954964
## iter 80 value 5.931183
## iter 90 value 5.915486
## iter 100 value 5.896061
## final value 5.896061
## stopped after 100 iterations
## # A tibble: 4 × 6
## y.level term estimate std.error statistic p.value
## <chr> <chr> <dbl> <dbl> <dbl> <dbl>
## 1 decrease (Intercept) 1 45.4 0.942 0.346
## 2 decrease agreeableness 0.0000000557 17.4 -0.959 0.337
## 3 increase (Intercept) 0.816 8.04 0.185 0.853
## 4 increase agreeableness 0.174 2.52 -0.617 0.537
tidy(m <- nnet::multinom(change ~ conscientiousness, data=dat |> filter(feature=="f0median", section=="Lists"))) |>
as_tibble() |>
mutate(estimate = plogis(estimate))
## # weights: 9 (4 variable)
## initial value 41.747267
## iter 10 value 9.453357
## iter 20 value 8.842892
## iter 30 value 8.826723
## final value 8.826014
## converged
## # A tibble: 4 × 6
## y.level term estimate std.error statistic p.value
## <chr> <chr> <dbl> <dbl> <dbl> <dbl>
## 1 decrease (Intercept) 0.00000425 12.5 -0.992 0.321
## 2 decrease conscientiousness 0.887 2.80 0.736 0.461
## 3 increase (Intercept) 0.0999 6.59 -0.334 0.739
## 4 increase conscientiousness 0.413 1.67 -0.210 0.834
tidy(m <- nnet::multinom(change ~ neuroticism, data=dat |> filter(feature=="f0median", section=="Lists"))) |>
as_tibble() |>
mutate(estimate = plogis(estimate))
## # weights: 9 (4 variable)
## initial value 41.747267
## iter 10 value 6.437604
## iter 20 value 6.132349
## iter 30 value 6.130790
## final value 6.130780
## converged
## # A tibble: 4 × 6
## y.level term estimate std.error statistic p.value
## <chr> <chr> <dbl> <dbl> <dbl> <dbl>
## 1 decrease (Intercept) 0.00000197 9.02 -1.46 0.145
## 2 decrease neuroticism 0.930 2.19 1.18 0.239
## 3 increase (Intercept) 0.999 7.24 1.00 0.317
## 4 increase neuroticism 0.00984 3.70 -1.25 0.212
No effect of forehead temperature change on f0 median change
ggplot(ts |> filter(section=="Lists", feature=="f0median", !is.na(Forehead)), aes(Forehead, fill=change))+
geom_bar()+
scale_fill_manual(values = c("f0Decrease" = "lightblue", "f0Increase" = "red", "f0NS" = "gray"))+
ggtitle("Forehead temp. change and f0 median change")
tidy(m <- nnet::multinom(change ~ Forehead, data=ts |> filter(feature=="f0median", section=="Lists", !is.na(Forehead)))) |>
as_tibble() |>
mutate(estimate = plogis(estimate))
## # weights: 12 (6 variable)
## initial value 37.352818
## iter 10 value 7.935794
## iter 20 value 7.888776
## final value 7.887962
## converged
## # A tibble: 6 × 6
## y.level term estimate std.error statistic p.value
## <chr> <chr> <dbl> <dbl> <dbl> <dbl>
## 1 f0Decrease (Intercept) 0.000000291 514. -0.0293 0.977
## 2 f0Decrease ForeheadtempDecrease 0.223 3497. -0.000357 1.00
## 3 f0Decrease ForeheadtempIncrease 1.00 514. 0.0237 0.981
## 4 f0Increase (Intercept) 0.000000291 514. -0.0293 0.977
## 5 f0Increase ForeheadtempDecrease 0.223 3497. -0.000357 1.00
## 6 f0Increase ForeheadtempIncrease 1.00 514. 0.0237 0.981
emmeans(m, pairwise~Forehead, by="change")
## $emmeans
## change = f0NS:
## Forehead prob SE df lower.CL upper.CL
## tempNS 1e+00 0.00021170 6 0.9994814 1.0005174
## tempDecrease 1e+00 0.00040886 6 0.9989994 1.0010003
## tempIncrease 9e-01 0.06708199 6 0.7358565 1.0641439
##
## change = f0Decrease:
## Forehead prob SE df lower.CL upper.CL
## tempNS 3e-07 0.00014970 6 -0.0003660 0.0003666
## tempDecrease 1e-07 0.00028910 6 -0.0007073 0.0007075
## tempIncrease 5e-02 0.04873393 6 -0.0692477 0.1692476
##
## change = f0Increase:
## Forehead prob SE df lower.CL upper.CL
## tempNS 3e-07 0.00014970 6 -0.0003660 0.0003666
## tempDecrease 1e-07 0.00028910 6 -0.0007073 0.0007075
## tempIncrease 5e-02 0.04873393 6 -0.0692477 0.1692475
##
## Confidence level used: 0.95
##
## $contrasts
## change = f0NS:
## contrast estimate SE df t.ratio p.value
## tempNS - tempDecrease -4.2e-07 0.000460 6 -0.001 1.0000
## tempNS - tempIncrease 1.0e-01 0.067082 6 1.491 0.3593
## tempDecrease - tempIncrease 1.0e-01 0.067083 6 1.491 0.3593
##
## change = f0Decrease:
## contrast estimate SE df t.ratio p.value
## tempNS - tempDecrease 2.1e-07 0.000326 6 0.001 1.0000
## tempNS - tempIncrease -5.0e-02 0.048734 6 -1.026 0.5890
## tempDecrease - tempIncrease -5.0e-02 0.048735 6 -1.026 0.5890
##
## change = f0Increase:
## contrast estimate SE df t.ratio p.value
## tempNS - tempDecrease 2.1e-07 0.000326 6 0.001 1.0000
## tempNS - tempIncrease -5.0e-02 0.048734 6 -1.026 0.5890
## tempDecrease - tempIncrease -5.0e-02 0.048735 6 -1.026 0.5890
##
## P value adjustment: tukey method for comparing a family of 3 estimates
No effect of nose temperature change on f0 median change
ggplot(ts |> filter(section=="Lists", feature=="f0median", !is.na(Nose)), aes(Nose, fill=change))+
geom_bar()+
scale_fill_manual(values = c("f0Decrease" = "lightblue", "f0Increase" = "red", "f0NS" = "gray"))+
ggtitle("Nose temp. change and f0 median change")
tidy(m <- nnet::multinom(change ~ Nose, data=ts |> filter(feature=="f0median", section=="Lists", !is.na(Nose)))) |>
as_tibble() |>
mutate(estimate = plogis(estimate))
## # weights: 12 (6 variable)
## initial value 37.352818
## iter 10 value 7.483791
## iter 20 value 7.275418
## iter 30 value 7.268836
## iter 30 value 7.268836
## iter 30 value 7.268836
## final value 7.268836
## converged
## # A tibble: 6 × 6
## y.level term estimate std.error statistic p.value
## <chr> <chr> <dbl> <dbl> <dbl> <dbl>
## 1 f0Decrease (Intercept) 0.000000827 348. -0.0403 0.968
## 2 f0Decrease NosetempDecrease 0.0621 2166. -0.00125 0.999
## 3 f0Decrease NosetempIncrease 1.00 348. 0.0320 0.975
## 4 f0Increase (Intercept) 0.0909 1.05 -2.20 0.0281
## 5 f0Increase NosetempDecrease 0.0000145 415. -0.0268 0.979
## 6 f0Increase NosetempIncrease 0.0000976 75.5 -0.122 0.903
emmeans(m, pairwise~Nose, by="change")
## $emmeans
## change = f0NS:
## Nose prob SE df lower.CL upper.CL
## tempNS 0.9091090 0.0866707 6 0.6970335 1.1211845
## tempDecrease 0.9999985 0.0006134 6 0.9984977 1.0014993
## tempIncrease 0.9473564 0.0512333 6 0.8219930 1.0727199
##
## change = f0Decrease:
## Nose prob SE df lower.CL upper.CL
## tempNS 0.0000008 0.0002614 6 -0.0006389 0.0006404
## tempDecrease 0.0000001 0.0001169 6 -0.0002861 0.0002862
## tempIncrease 0.0526343 0.0512291 6 -0.0727187 0.1779874
##
## change = f0Increase:
## Nose prob SE df lower.CL upper.CL
## tempNS 0.0908903 0.0866703 6 -0.1211844 0.3029650
## tempDecrease 0.0000015 0.0006021 6 -0.0014719 0.0014748
## tempIncrease 0.0000092 0.0006974 6 -0.0016974 0.0017158
##
## Confidence level used: 0.95
##
## $contrasts
## change = f0NS:
## contrast estimate SE df t.ratio p.value
## tempNS - tempDecrease -9.09e-02 0.086673 6 -1.049 0.5764
## tempNS - tempIncrease -3.82e-02 0.100681 6 -0.380 0.9245
## tempDecrease - tempIncrease 5.26e-02 0.051237 6 1.027 0.5882
##
## change = f0Decrease:
## contrast estimate SE df t.ratio p.value
## tempNS - tempDecrease 7.00e-07 0.000286 6 0.002 1.0000
## tempNS - tempIncrease -5.26e-02 0.051230 6 -1.027 0.5882
## tempDecrease - tempIncrease -5.26e-02 0.051229 6 -1.027 0.5882
##
## change = f0Increase:
## contrast estimate SE df t.ratio p.value
## tempNS - tempDecrease 9.09e-02 0.086672 6 1.049 0.5764
## tempNS - tempIncrease 9.09e-02 0.086673 6 1.049 0.5765
## tempDecrease - tempIncrease -7.79e-06 0.000921 6 -0.008 1.0000
##
## P value adjustment: tukey method for comparing a family of 3 estimates
No effect of eye temperature change on f0 median change
ggplot(ts |> filter(section=="Lists", feature=="f0median", !is.na(Eyes)), aes(Eyes, fill=change))+
geom_bar()+
scale_fill_manual(values = c("f0Decrease" = "lightblue", "f0Increase" = "red", "f0NS" = "gray"))+
ggtitle("Eye temp. change and f0 median change")
tidy(m <- nnet::multinom(change ~ Eyes, data=ts |> filter(feature=="f0median", section=="Lists", !is.na(Eyes)))) |>
as_tibble() |>
mutate(estimate = plogis(estimate))
## # weights: 12 (6 variable)
## initial value 31.859756
## iter 10 value 7.321812
## iter 20 value 7.276667
## final value 7.276414
## converged
## # A tibble: 6 × 6
## y.level term estimate std.error statistic p.value
## <chr> <chr> <dbl> <dbl> <dbl> <dbl>
## 1 f0Decrease (Intercept) 0.0000000929 900. -0.0180 0.986
## 2 f0Decrease EyestempDecrease 0.210 37.5 -0.0353 0.972
## 3 f0Decrease EyestempIncrease 1.00 900. 0.0151 0.988
## 4 f0Increase (Intercept) 0.0000000929 900. -0.0180 0.986
## 5 f0Increase EyestempDecrease 0.210 37.5 -0.0353 0.972
## 6 f0Increase EyestempIncrease 1.00 900. 0.0151 0.988
emmeans(m, pairwise~Eyes, by="change") # isn't working (I'm thinking maybe because "eyes" have lots of missing values)
## $emmeans
## change = f0NS:
## Eyes prob SE df lower.CL upper.CL
## tempNS 0.9999998 0.00011824 6 0.9997105 1.0002891
## tempDecrease 1.0000000 0.00003271 6 0.9999199 1.0000800
## tempIncrease 0.8666546 0.08777411 6 0.6518791 1.0814301
##
## change = f0Decrease:
## Eyes prob SE df lower.CL upper.CL
## tempNS 0.0000001 0.00008361 6 -0.0002045 0.0002047
## tempDecrease 0.0000000 0.00002313 6 -0.0000566 0.0000566
## tempIncrease 0.0666727 0.06440883 6 -0.0909300 0.2242754
##
## change = f0Increase:
## Eyes prob SE df lower.CL upper.CL
## tempNS 0.0000001 0.00008361 6 -0.0002045 0.0002047
## tempDecrease 0.0000000 0.00002313 6 -0.0000566 0.0000566
## tempIncrease 0.0666727 0.06440882 6 -0.0909300 0.2242754
##
## Confidence level used: 0.95
##
## $contrasts
## change = f0NS:
## contrast estimate SE df t.ratio p.value
## tempNS - tempDecrease -1.40e-07 8.55e-05 6 -0.002 1.0000
## tempNS - tempIncrease 1.33e-01 8.78e-02 6 1.519 0.3476
## tempDecrease - tempIncrease 1.33e-01 8.78e-02 6 1.519 0.3476
##
## change = f0Decrease:
## contrast estimate SE df t.ratio p.value
## tempNS - tempDecrease 7.00e-08 6.05e-05 6 0.001 1.0000
## tempNS - tempIncrease -6.67e-02 6.44e-02 6 -1.035 0.5839
## tempDecrease - tempIncrease -6.67e-02 6.44e-02 6 -1.035 0.5839
##
## change = f0Increase:
## contrast estimate SE df t.ratio p.value
## tempNS - tempDecrease 7.00e-08 6.05e-05 6 0.001 1.0000
## tempNS - tempIncrease -6.67e-02 6.44e-02 6 -1.035 0.5839
## tempDecrease - tempIncrease -6.67e-02 6.44e-02 6 -1.035 0.5839
##
## P value adjustment: tukey method for comparing a family of 3 estimates
No effect of cheek temperature change on f0 median change
ggplot(ts |> filter(section=="Lists", feature=="f0median", , !is.na(Cheeks)), aes(Cheeks, fill=change))+
geom_bar()+
scale_fill_manual(values = c("f0Decrease" = "lightblue", "f0Increase" = "red", "f0NS" = "gray"))+
ggtitle("Cheek temp. change and f0 median change")
tidy(m <- nnet::multinom(change ~ Cheeks, data=ts |> filter(feature=="f0median", section=="Lists", !is.na(Cheeks)))) |>
as_tibble() |>
mutate(estimate = plogis(estimate))
## # weights: 12 (6 variable)
## initial value 37.352818
## iter 10 value 7.282132
## iter 20 value 7.157311
## final value 7.127866
## converged
## # A tibble: 6 × 6
## y.level term estimate std.error statistic p.value
## <chr> <chr> <dbl> <dbl> <dbl> <dbl>
## 1 f0Decrease (Intercept) 0.00000415 105. -0.118 0.906
## 2 f0Decrease CheekstempDecrease 0.166 644. -0.00251 0.998
## 3 f0Decrease CheekstempIncrease 1.00 105. 0.0998 0.920
## 4 f0Increase (Intercept) 0.0435 1.02 -3.02 0.00250
## 5 f0Increase CheekstempDecrease 0.000302 156. -0.0520 0.959
## 6 f0Increase CheekstempIncrease 0.000429 85.6 -0.0906 0.928
emmeans(m, pairwise~Cheeks, by="change")
## $emmeans
## change = f0NS:
## Cheeks prob SE df lower.CL upper.CL
## tempNS 0.9565257 0.0425207 6 0.852481 1.060570
## tempDecrease 0.9999854 0.0022024 6 0.994596 1.005375
## tempIncrease 0.8749864 0.1169322 6 0.588864 1.161109
##
## change = f0Decrease:
## Cheeks prob SE df lower.CL upper.CL
## tempNS 0.0000040 0.0004155 6 -0.001013 0.001021
## tempDecrease 0.0000008 0.0005244 6 -0.001282 0.001284
## tempIncrease 0.1249965 0.1169254 6 -0.161110 0.411103
##
## change = f0Increase:
## Cheeks prob SE df lower.CL upper.CL
## tempNS 0.0434703 0.0425189 6 -0.060570 0.147510
## tempDecrease 0.0000137 0.0021391 6 -0.005220 0.005248
## tempIncrease 0.0000171 0.0014607 6 -0.003557 0.003591
##
## Confidence level used: 0.95
##
## $contrasts
## change = f0NS:
## contrast estimate SE df t.ratio p.value
## tempNS - tempDecrease -4.35e-02 0.042578 6 -1.021 0.5919
## tempNS - tempIncrease 8.15e-02 0.124423 6 0.655 0.7964
## tempDecrease - tempIncrease 1.25e-01 0.116953 6 1.069 0.5653
##
## change = f0Decrease:
## contrast estimate SE df t.ratio p.value
## tempNS - tempDecrease 3.15e-06 0.000669 6 0.005 1.0000
## tempNS - tempIncrease -1.25e-01 0.116926 6 -1.069 0.5652
## tempDecrease - tempIncrease -1.25e-01 0.116927 6 -1.069 0.5652
##
## change = f0Increase:
## contrast estimate SE df t.ratio p.value
## tempNS - tempDecrease 4.35e-02 0.042573 6 1.021 0.5919
## tempNS - tempIncrease 4.35e-02 0.042544 6 1.021 0.5915
## tempDecrease - tempIncrease -3.34e-06 0.002590 6 -0.001 1.0000
##
## P value adjustment: tukey method for comparing a family of 3 estimates
No effect of condition
ggplot(dat |> filter(feature=="f0max", section=="Lists"), aes(condition, fill=change))+
geom_bar()+
scale_fill_manual(values = c("decrease" = "lightblue", "increase" = "red", "ns" = "gray"))+
ggtitle("f0 max (first part of exp - Lists)")
tidy(m <- nnet::multinom(change ~ condition, data=dat |> filter(feature=="f0max", section=="Lists"))) |>
as_tibble() |>
mutate(estimate = plogis(estimate)) # plogis transforms log odds (the output of the model) into probabilities
## # weights: 9 (4 variable)
## initial value 41.747267
## iter 10 value 17.987773
## iter 20 value 17.963267
## iter 30 value 17.959908
## iter 40 value 17.959373
## iter 40 value 17.959372
## iter 40 value 17.959372
## final value 17.959372
## converged
## # A tibble: 4 × 6
## y.level term estimate std.error statistic p.value
## <chr> <chr> <dbl> <dbl> <dbl> <dbl>
## 1 decrease (Intercept) 0.0000168 57.5 -0.191 0.848
## 2 decrease conditionimpersonal 1.00 57.5 0.179 0.858
## 3 increase (Intercept) 0.100 0.745 -2.95 0.00320
## 4 increase conditionimpersonal 0.000117 80.1 -0.113 0.910
emmeans(m, pairwise~condition, by="change")
## $emmeans
## change = ns:
## condition prob SE df lower.CL upper.CL
## close 9.00e-01 0.067085 4 0.71374 1.08625
## impersonal 6.67e-01 0.111110 4 0.35818 0.97517
##
## change = decrease:
## condition prob SE df lower.CL upper.CL
## close 1.51e-05 0.000869 4 -0.00240 0.00243
## impersonal 3.33e-01 0.111110 4 0.02483 0.64181
##
## change = increase:
## condition prob SE df lower.CL upper.CL
## close 1.00e-01 0.067080 4 -0.08625 0.28624
## impersonal 8.70e-06 0.000694 4 -0.00192 0.00193
##
## Confidence level used: 0.95
##
## $contrasts
## change = ns:
## contrast estimate SE df t.ratio p.value
## close - impersonal 0.233 0.1298 4 1.798 0.1466
##
## change = decrease:
## contrast estimate SE df t.ratio p.value
## close - impersonal -0.333 0.1111 4 -3.000 0.0400
##
## change = increase:
## contrast estimate SE df t.ratio p.value
## close - impersonal 0.100 0.0671 4 1.490 0.2104
f0 max decreased less when the speaker felt closer to their partner.
ggplot(dat |> filter(feature=="f0max", section=="Lists"), aes(change, closeness))+
geom_boxplot()
tidy(m <- nnet::multinom(change ~ closeness, data=dat |> filter(feature=="f0max", section=="Lists"))) |>
as_tibble() |>
mutate(estimate = plogis(estimate))
## # weights: 9 (4 variable)
## initial value 41.747267
## iter 10 value 19.290986
## final value 19.290383
## converged
## # A tibble: 4 × 6
## y.level term estimate std.error statistic p.value
## <chr> <chr> <dbl> <dbl> <dbl> <dbl>
## 1 decrease (Intercept) 0.813 1.18 1.24 0.214
## 2 decrease closeness 0.244 0.498 -2.27 0.0229
## 3 increase (Intercept) 0.0651 1.81 -1.47 0.141
## 4 increase closeness 0.497 0.414 -0.0259 0.979
f0 max also decreased less (smaller effect) when the speaker liked their partner more.
ggplot(dat |> filter(feature=="f0max", section=="Lists"), aes(change, likeability))+
geom_boxplot()
tidy(m <- nnet::multinom(change ~ likeability, data=dat |> filter(feature=="f0max", section=="Lists"))) |>
as_tibble() |>
mutate(estimate = plogis(estimate))
## # weights: 9 (4 variable)
## initial value 41.747267
## iter 10 value 21.537138
## final value 21.412029
## converged
## # A tibble: 4 × 6
## y.level term estimate std.error statistic p.value
## <chr> <chr> <dbl> <dbl> <dbl> <dbl>
## 1 decrease (Intercept) 0.955 2.23 1.37 0.171
## 2 decrease likeability 0.325 0.366 -2.00 0.0452
## 3 increase (Intercept) 0.00989 4.51 -1.02 0.307
## 4 increase likeability 0.565 0.595 0.438 0.662
tidy(m <- nnet::multinom(change ~ similarity, data=dat |> filter(feature=="f0max", section=="Lists"))) |>
as_tibble() |>
mutate(estimate = plogis(estimate))
## # weights: 9 (4 variable)
## initial value 41.747267
## iter 10 value 23.588881
## final value 23.588845
## converged
## # A tibble: 4 × 6
## y.level term estimate std.error statistic p.value
## <chr> <chr> <dbl> <dbl> <dbl> <dbl>
## 1 decrease (Intercept) 0.324 1.42 -0.519 0.604
## 2 decrease similarity 0.460 0.256 -0.629 0.529
## 3 increase (Intercept) 0.0111 3.05 -1.47 0.141
## 4 increase similarity 0.572 0.458 0.637 0.524
tidy(m <- nnet::multinom(change ~ becomeFriends, data=dat |> filter(feature=="f0max", section=="Lists"))) |>
as_tibble() |>
mutate(estimate = plogis(estimate))
## # weights: 9 (4 variable)
## initial value 41.747267
## iter 10 value 23.867638
## final value 23.867377
## converged
## # A tibble: 4 × 6
## y.level term estimate std.error statistic p.value
## <chr> <chr> <dbl> <dbl> <dbl> <dbl>
## 1 decrease (Intercept) 0.260 1.51 -0.691 0.490
## 2 decrease becomeFriends 0.476 0.252 -0.386 0.700
## 3 increase (Intercept) 0.0189 3.13 -1.26 0.207
## 4 increase becomeFriends 0.550 0.471 0.423 0.672
No effect of BFI scores
tidy(m <- nnet::multinom(change ~ extraversion, data=dat |> filter(feature=="f0max", section=="Lists"))) |>
as_tibble() |>
mutate(estimate = plogis(estimate))
## # weights: 9 (4 variable)
## initial value 41.747267
## iter 10 value 23.153713
## final value 23.104344
## converged
## # A tibble: 4 × 6
## y.level term estimate std.error statistic p.value
## <chr> <chr> <dbl> <dbl> <dbl> <dbl>
## 1 decrease (Intercept) 0.879 2.83 0.699 0.485
## 2 decrease extraversion 0.254 0.869 -1.24 0.215
## 3 increase (Intercept) 0.0191 4.52 -0.872 0.383
## 4 increase extraversion 0.585 1.23 0.280 0.780
tidy(m <- nnet::multinom(change ~ openness, data=dat |> filter(feature=="f0max", section=="Lists"))) |>
as_tibble() |>
mutate(estimate = plogis(estimate))
## # weights: 9 (4 variable)
## initial value 41.747267
## iter 10 value 23.881954
## final value 23.878063
## converged
## # A tibble: 4 × 6
## y.level term estimate std.error statistic p.value
## <chr> <chr> <dbl> <dbl> <dbl> <dbl>
## 1 decrease (Intercept) 0.274 1.96 -0.496 0.620
## 2 decrease openness 0.455 0.552 -0.328 0.743
## 3 increase (Intercept) 0.253 3.06 -0.354 0.723
## 4 increase openness 0.383 0.910 -0.524 0.600
tidy(m <- nnet::multinom(change ~ agreeableness, data=dat |> filter(feature=="f0max", section=="Lists"))) |>
as_tibble() |>
mutate(estimate = plogis(estimate))
## # weights: 9 (4 variable)
## initial value 41.747267
## iter 10 value 22.303008
## iter 20 value 22.293775
## final value 22.293680
## converged
## # A tibble: 4 × 6
## y.level term estimate std.error statistic p.value
## <chr> <chr> <dbl> <dbl> <dbl> <dbl>
## 1 decrease (Intercept) 0.989 3.52 1.29 0.196
## 2 decrease agreeableness 0.135 1.09 -1.70 0.0895
## 3 increase (Intercept) 0.214 5.17 -0.251 0.802
## 4 increase agreeableness 0.400 1.49 -0.272 0.786
tidy(m <- nnet::multinom(change ~ conscientiousness, data=dat |> filter(feature=="f0max", section=="Lists"))) |>
as_tibble() |>
mutate(estimate = plogis(estimate))
## # weights: 9 (4 variable)
## initial value 41.747267
## iter 10 value 23.875699
## iter 20 value 23.856798
## iter 30 value 23.855926
## final value 23.855899
## converged
## # A tibble: 4 × 6
## y.level term estimate std.error statistic p.value
## <chr> <chr> <dbl> <dbl> <dbl> <dbl>
## 1 decrease (Intercept) 0.538 2.97 0.0518 0.959
## 2 decrease conscientiousness 0.391 0.750 -0.593 0.553
## 3 increase (Intercept) 0.0247 5.72 -0.642 0.521
## 4 increase conscientiousness 0.559 1.38 0.172 0.863
tidy(m <- nnet::multinom(change ~ neuroticism, data=dat |> filter(feature=="f0max", section=="Lists"))) |>
as_tibble() |>
mutate(estimate = plogis(estimate))
## # weights: 9 (4 variable)
## initial value 41.747267
## iter 10 value 23.325217
## iter 20 value 23.257297
## final value 23.257293
## converged
## # A tibble: 4 × 6
## y.level term estimate std.error statistic p.value
## <chr> <chr> <dbl> <dbl> <dbl> <dbl>
## 1 decrease (Intercept) 0.0186 2.28 -1.74 0.0817
## 2 decrease neuroticism 0.673 0.661 1.09 0.274
## 3 increase (Intercept) 0.00627 3.72 -1.36 0.173
## 4 increase neuroticism 0.673 1.07 0.678 0.498
No effect of forehead temperature change on f0 max change
ggplot(ts |> filter(section=="Lists", feature=="f0max", !is.na(Forehead)), aes(Forehead, fill=change))+
geom_bar()+
scale_fill_manual(values = c("f0Decrease" = "lightblue", "f0Increase" = "red", "f0NS" = "gray"))+
ggtitle("Forehead temp. change and f0 max change")
tidy(m <- nnet::multinom(change ~ Forehead, data=ts |> filter(feature=="f0max", section=="Lists", !is.na(Forehead)))) |>
as_tibble() |>
mutate(estimate = plogis(estimate))
## # weights: 12 (6 variable)
## initial value 37.352818
## iter 10 value 17.414964
## iter 20 value 17.388859
## final value 17.386627
## converged
## # A tibble: 6 × 6
## y.level term estimate std.error statistic p.value
## <chr> <chr> <dbl> <dbl> <dbl> <dbl>
## 1 f0Decrease (Intercept) 0.167 0.775 -2.08 0.0377
## 2 f0Decrease ForeheadtempDecrease 0.000729 82.8 -0.0872 0.930
## 3 f0Decrease ForeheadtempIncrease 0.469 0.996 -0.126 0.900
## 4 f0Increase (Intercept) 0.0909 1.05 -2.20 0.0281
## 5 f0Increase ForeheadtempDecrease 0.0000754 364. -0.0261 0.979
## 6 f0Increase ForeheadtempIncrease 0.0000561 102. -0.0956 0.924
emmeans(m, pairwise~Forehead, by="change")
## $emmeans
## change = f0NS:
## Forehead prob SE df lower.CL upper.CL
## tempNS 7.69e-01 0.116852 6 0.48332 1.05517
## tempDecrease 1.00e+00 0.012383 6 0.96955 1.03015
## tempIncrease 8.50e-01 0.079846 6 0.65461 1.04536
##
## change = f0Decrease:
## Forehead prob SE df lower.CL upper.CL
## tempNS 1.54e-01 0.100066 6 -0.09102 0.39869
## tempDecrease 1.46e-04 0.012074 6 -0.02940 0.02969
## tempIncrease 1.50e-01 0.079845 6 -0.04537 0.34538
##
## change = f0Increase:
## Forehead prob SE df lower.CL upper.CL
## tempNS 7.69e-02 0.073903 6 -0.10392 0.25775
## tempDecrease 7.50e-06 0.002746 6 -0.00671 0.00673
## tempIncrease 4.80e-06 0.000488 6 -0.00119 0.00120
##
## Confidence level used: 0.95
##
## $contrasts
## change = f0NS:
## contrast estimate SE df t.ratio p.value
## tempNS - tempDecrease -2.31e-01 0.11751 6 -1.962 0.2023
## tempNS - tempIncrease -8.07e-02 0.14153 6 -0.571 0.8402
## tempDecrease - tempIncrease 1.50e-01 0.08080 6 1.855 0.2315
##
## change = f0Decrease:
## contrast estimate SE df t.ratio p.value
## tempNS - tempDecrease 1.54e-01 0.10079 6 1.525 0.3453
## tempNS - tempIncrease 3.83e-03 0.12802 6 0.030 0.9995
## tempDecrease - tempIncrease -1.50e-01 0.08075 6 -1.856 0.2312
##
## change = f0Increase:
## contrast estimate SE df t.ratio p.value
## tempNS - tempDecrease 7.69e-02 0.07395 6 1.040 0.5812
## tempNS - tempIncrease 7.69e-02 0.07390 6 1.041 0.5808
## tempDecrease - tempIncrease 2.78e-06 0.00279 6 0.001 1.0000
##
## P value adjustment: tukey method for comparing a family of 3 estimates
No effect of nose temperature change on f0 max change
ggplot(ts |> filter(section=="Lists", feature=="f0max", !is.na(Nose)), aes(Nose, fill=change))+
geom_bar()+
scale_fill_manual(values = c("f0Decrease" = "lightblue", "f0Increase" = "red", "f0NS" = "gray"))+
ggtitle("Nose temp. change and f0 max change")
tidy(m <- nnet::multinom(change ~ Nose, data=ts |> filter(feature=="f0max", section=="Lists", !is.na(Nose)))) |>
as_tibble() |>
mutate(estimate = plogis(estimate))
## # weights: 12 (6 variable)
## initial value 37.352818
## iter 10 value 15.423706
## iter 20 value 15.382501
## iter 30 value 15.378962
## iter 30 value 15.378961
## iter 30 value 15.378961
## final value 15.378961
## converged
## # A tibble: 6 × 6
## y.level term estimate std.error statistic p.value
## <chr> <chr> <dbl> <dbl> <dbl> <dbl>
## 1 f0Decrease (Intercept) 0.0909 1.05 -2.20 0.0281
## 2 f0Decrease NosetempDecrease 0.0000403 288. -0.0352 0.972
## 3 f0Decrease NosetempIncrease 0.727 1.19 0.824 0.410
## 4 f0Increase (Intercept) 0.0000175 75.6 -0.145 0.885
## 5 f0Increase NosetempDecrease 1.00 75.6 0.130 0.896
## 6 f0Increase NosetempIncrease 0.0926 208. -0.0110 0.991
emmeans(m, pairwise~Nose, by="change")
## $emmeans
## change = f0NS:
## Nose prob SE df lower.CL upper.CL
## tempNS 9.09e-01 0.086685 6 0.696966 1.121186
## tempDecrease 7.50e-01 0.216499 6 0.220270 1.279779
## tempIncrease 7.89e-01 0.093529 6 0.560616 1.018330
##
## change = f0Decrease:
## Nose prob SE df lower.CL upper.CL
## tempNS 9.09e-02 0.086678 6 -0.121185 0.303001
## tempDecrease 3.00e-06 0.000869 6 -0.002124 0.002130
## tempIncrease 2.11e-01 0.093529 6 -0.018331 0.439383
##
## change = f0Increase:
## Nose prob SE df lower.CL upper.CL
## tempNS 1.59e-05 0.001202 6 -0.002925 0.002957
## tempDecrease 2.50e-01 0.216498 6 -0.279780 0.779725
## tempIncrease 1.40e-06 0.000272 6 -0.000665 0.000668
##
## Confidence level used: 0.95
##
## $contrasts
## change = f0NS:
## contrast estimate SE df t.ratio p.value
## tempNS - tempDecrease 1.59e-01 0.23321 6 0.682 0.7821
## tempNS - tempIncrease 1.20e-01 0.12752 6 0.938 0.6385
## tempDecrease - tempIncrease -3.94e-02 0.23584 6 -0.167 0.9847
##
## change = f0Decrease:
## contrast estimate SE df t.ratio p.value
## tempNS - tempDecrease 9.09e-02 0.08668 6 1.049 0.5764
## tempNS - tempIncrease -1.20e-01 0.12752 6 -0.938 0.6384
## tempDecrease - tempIncrease -2.11e-01 0.09353 6 -2.251 0.1403
##
## change = f0Increase:
## contrast estimate SE df t.ratio p.value
## tempNS - tempDecrease -2.50e-01 0.21650 6 -1.155 0.5191
## tempNS - tempIncrease 1.45e-05 0.00123 6 0.012 0.9999
## tempDecrease - tempIncrease 2.50e-01 0.21650 6 1.155 0.5191
##
## P value adjustment: tukey method for comparing a family of 3 estimates
No effect of eye temperature change on f0 max change
ggplot(ts |> filter(section=="Lists", feature=="f0max", !is.na(Eyes)), aes(Eyes, fill=change))+
geom_bar()+
scale_fill_manual(values = c("f0Decrease" = "lightblue", "f0Increase" = "red", "f0NS" = "gray"))+
ggtitle("Eye temp. change and f0 max change")
tidy(m <- nnet::multinom(change ~ Eyes, data=ts |> filter(feature=="f0max", section=="Lists", !is.na(Eyes)))) |>
as_tibble() |>
mutate(estimate = plogis(estimate))
## # weights: 12 (6 variable)
## initial value 31.859756
## iter 10 value 15.760813
## iter 20 value 15.675517
## iter 30 value 15.666739
## iter 40 value 15.666431
## final value 15.666394
## converged
## # A tibble: 6 × 6
## y.level term estimate std.error statistic p.value
## <chr> <chr> <dbl> <dbl> <dbl> <dbl>
## 1 f0Decrease (Intercept) 0.0833 1.04 -2.30 0.0217
## 2 f0Decrease EyestempDecrease 0.000551 141. -0.0531 0.958
## 3 f0Decrease EyestempIncrease 0.800 1.20 1.16 0.247
## 4 f0Increase (Intercept) 0.0833 1.04 -2.30 0.0217
## 5 f0Increase EyestempDecrease 0.000527 144. -0.0522 0.958
## 6 f0Increase EyestempIncrease 0.0000750 115. -0.0823 0.934
emmeans(m, pairwise~Eyes, by="change") # isn't working (I'm thinking maybe because "eyes" have lots of missing values)
## $emmeans
## change = f0NS:
## Eyes prob SE df lower.CL upper.CL
## tempNS 8.46e-01 0.100065 6 0.60132 1.09102
## tempDecrease 1.00e+00 0.009900 6 0.97568 1.02413
## tempIncrease 7.33e-01 0.114181 6 0.45394 1.01272
##
## change = f0Decrease:
## Eyes prob SE df lower.CL upper.CL
## tempNS 7.69e-02 0.073903 6 -0.10392 0.25775
## tempDecrease 5.01e-05 0.007078 6 -0.01727 0.01737
## tempIncrease 2.67e-01 0.114180 6 -0.01272 0.54606
##
## change = f0Increase:
## Eyes prob SE df lower.CL upper.CL
## tempNS 7.69e-02 0.073901 6 -0.10392 0.25774
## tempDecrease 4.79e-05 0.006922 6 -0.01689 0.01699
## tempIncrease 5.00e-06 0.000577 6 -0.00141 0.00142
##
## Confidence level used: 0.95
##
## $contrasts
## change = f0NS:
## contrast estimate SE df t.ratio p.value
## tempNS - tempDecrease -1.54e-01 0.10055 6 -1.529 0.3437
## tempNS - tempIncrease 1.13e-01 0.15182 6 0.743 0.7485
## tempDecrease - tempIncrease 2.67e-01 0.11461 6 2.326 0.1275
##
## change = f0Decrease:
## contrast estimate SE df t.ratio p.value
## tempNS - tempDecrease 7.69e-02 0.07424 6 1.035 0.5838
## tempNS - tempIncrease -1.90e-01 0.13601 6 -1.395 0.4008
## tempDecrease - tempIncrease -2.67e-01 0.11440 6 -2.331 0.1267
##
## change = f0Increase:
## contrast estimate SE df t.ratio p.value
## tempNS - tempDecrease 7.69e-02 0.07422 6 1.036 0.5837
## tempNS - tempIncrease 7.69e-02 0.07390 6 1.041 0.5808
## tempDecrease - tempIncrease 4.29e-05 0.00695 6 0.006 1.0000
##
## P value adjustment: tukey method for comparing a family of 3 estimates
No effect of cheek temperature change on f0 max change
ggplot(ts |> filter(section=="Lists", feature=="f0max", !is.na(Cheeks)), aes(Cheeks, fill=change))+
geom_bar()+
scale_fill_manual(values = c("f0Decrease" = "lightblue", "f0Increase" = "red", "f0NS" = "gray"))+
ggtitle("Cheek temp. change and f0 max change")
tidy(m <- nnet::multinom(change ~ Cheeks, data=ts |> filter(feature=="f0max", section=="Lists", !is.na(Cheeks)))) |>
as_tibble() |>
mutate(estimate = plogis(estimate))
## # weights: 12 (6 variable)
## initial value 37.352818
## iter 10 value 17.414929
## iter 20 value 17.378473
## final value 17.375169
## converged
## # A tibble: 6 × 6
## y.level term estimate std.error statistic p.value
## <chr> <chr> <dbl> <dbl> <dbl> <dbl>
## 1 f0Decrease (Intercept) 0.136 0.621 -2.97 0.00297
## 2 f0Decrease CheekstempDecrease 0.000355 77.1 -0.103 0.918
## 3 f0Decrease CheekstempIncrease 0.679 1.03 0.729 0.466
## 4 f0Increase (Intercept) 0.0500 1.03 -2.87 0.00411
## 5 f0Increase CheekstempDecrease 0.0000915 263. -0.0353 0.972
## 6 f0Increase CheekstempIncrease 0.000366 93.0 -0.0851 0.932
emmeans(m, pairwise~Cheeks, by="change")
## $emmeans
## change = f0NS:
## Cheeks prob SE df lower.CL upper.CL
## tempNS 8.26e-01 0.07903 6 0.63274 1.0195
## tempDecrease 1.00e+00 0.00451 6 0.98891 1.0110
## tempIncrease 7.50e-01 0.15310 6 0.37536 1.1246
##
## change = f0Decrease:
## Cheeks prob SE df lower.CL upper.CL
## tempNS 1.30e-01 0.07022 6 -0.04140 0.3022
## tempDecrease 5.61e-05 0.00432 6 -0.01052 0.0106
## tempIncrease 2.50e-01 0.15310 6 -0.12460 0.6246
##
## change = f0Increase:
## Cheeks prob SE df lower.CL upper.CL
## tempNS 4.35e-02 0.04252 6 -0.06057 0.1475
## tempDecrease 4.80e-06 0.00127 6 -0.00309 0.0031
## tempIncrease 1.45e-05 0.00134 6 -0.00328 0.0033
##
## Confidence level used: 0.95
##
## $contrasts
## change = f0NS:
## contrast estimate SE df t.ratio p.value
## tempNS - tempDecrease -1.74e-01 0.07916 6 -2.196 0.1505
## tempNS - tempIncrease 7.61e-02 0.17229 6 0.442 0.8997
## tempDecrease - tempIncrease 2.50e-01 0.15316 6 1.632 0.3041
##
## change = f0Decrease:
## contrast estimate SE df t.ratio p.value
## tempNS - tempDecrease 1.30e-01 0.07035 6 1.853 0.2320
## tempNS - tempIncrease -1.20e-01 0.16843 6 -0.710 0.7668
## tempDecrease - tempIncrease -2.50e-01 0.15316 6 -1.632 0.3041
##
## change = f0Increase:
## contrast estimate SE df t.ratio p.value
## tempNS - tempDecrease 4.35e-02 0.04254 6 1.022 0.5913
## tempNS - tempIncrease 4.35e-02 0.04254 6 1.021 0.5915
## tempDecrease - tempIncrease -9.65e-06 0.00185 6 -0.005 1.0000
##
## P value adjustment: tukey method for comparing a family of 3 estimates
No effect of condition
ggplot(dat |> filter(feature=="f0SD", section=="Lists"), aes(condition, fill=change))+
geom_bar()+
scale_fill_manual(values = c("decrease" = "lightblue", "increase" = "red", "ns" = "gray"))+
ggtitle("f0 SD (first part of exp - Lists)")
tidy(m <- nnet::multinom(change ~ condition, data=dat |> filter(feature=="f0SD", section=="Lists"))) |>
as_tibble() |>
mutate(estimate = plogis(estimate)) # plogis transforms log odds (the output of the model) into probabilities
## # weights: 9 (4 variable)
## initial value 41.747267
## iter 10 value 21.010425
## iter 20 value 20.999217
## final value 20.999201
## converged
## # A tibble: 4 × 6
## y.level term estimate std.error statistic p.value
## <chr> <chr> <dbl> <dbl> <dbl> <dbl>
## 1 decrease (Intercept) 0.105 0.748 -2.86 0.00420
## 2 decrease conditionimpersonal 0.766 0.914 1.30 0.195
## 3 increase (Intercept) 0.0555 1.03 -2.75 0.00590
## 4 increase conditionimpersonal 0.000413 56.3 -0.138 0.890
emmeans(m, pairwise~condition, by="change")
## $emmeans
## change = ns:
## condition prob SE df lower.CL upper.CL
## close 8.50e-01 0.079835 4 0.62838 1.07170
## impersonal 7.22e-01 0.105571 4 0.42912 1.01534
##
## change = decrease:
## condition prob SE df lower.CL upper.CL
## close 1.00e-01 0.067075 4 -0.08625 0.28621
## impersonal 2.78e-01 0.105569 4 -0.01535 0.57086
##
## change = increase:
## condition prob SE df lower.CL upper.CL
## close 5.00e-02 0.048727 4 -0.08530 0.18527
## impersonal 1.76e-05 0.000988 4 -0.00272 0.00276
##
## Confidence level used: 0.95
##
## $contrasts
## change = ns:
## contrast estimate SE df t.ratio p.value
## close - impersonal 0.128 0.1324 4 0.966 0.3889
##
## change = decrease:
## contrast estimate SE df t.ratio p.value
## close - impersonal -0.178 0.1251 4 -1.421 0.2283
##
## change = increase:
## contrast estimate SE df t.ratio p.value
## close - impersonal 0.050 0.0487 4 1.025 0.3632
No effect of perception of partner
tidy(m <- nnet::multinom(change ~ closeness, data=dat |> filter(feature=="f0SD", section=="Lists"))) |>
as_tibble() |>
mutate(estimate = plogis(estimate))
## # weights: 9 (4 variable)
## initial value 41.747267
## iter 10 value 20.497020
## final value 20.141788
## converged
## # A tibble: 4 × 6
## y.level term estimate std.error statistic p.value
## <chr> <chr> <dbl> <dbl> <dbl> <dbl>
## 1 decrease (Intercept) 0.601 0.956 0.427 0.670
## 2 decrease closeness 0.356 0.316 -1.88 0.0599
## 3 increase (Intercept) 0.0964 2.18 -1.03 0.305
## 4 increase closeness 0.417 0.636 -0.526 0.599
tidy(m <- nnet::multinom(change ~ similarity, data=dat |> filter(feature=="f0SD", section=="Lists"))) |>
as_tibble() |>
mutate(estimate = plogis(estimate))
## # weights: 9 (4 variable)
## initial value 41.747267
## iter 10 value 22.151231
## final value 22.151123
## converged
## # A tibble: 4 × 6
## y.level term estimate std.error statistic p.value
## <chr> <chr> <dbl> <dbl> <dbl> <dbl>
## 1 decrease (Intercept) 0.412 1.34 -0.266 0.790
## 2 decrease similarity 0.450 0.242 -0.833 0.405
## 3 increase (Intercept) 0.113 3.12 -0.662 0.508
## 4 increase similarity 0.438 0.586 -0.423 0.672
tidy(m <- nnet::multinom(change ~ likeability, data=dat |> filter(feature=="f0SD", section=="Lists"))) |>
as_tibble() |>
mutate(estimate = plogis(estimate))
## # weights: 9 (4 variable)
## initial value 41.747267
## iter 10 value 20.449188
## iter 20 value 20.419352
## final value 20.419310
## converged
## # A tibble: 4 × 6
## y.level term estimate std.error statistic p.value
## <chr> <chr> <dbl> <dbl> <dbl> <dbl>
## 1 decrease (Intercept) 0.934 2.06 1.29 0.199
## 2 decrease likeability 0.347 0.327 -1.93 0.0533
## 3 increase (Intercept) 0.0461 5.30 -0.572 0.567
## 4 increase likeability 0.487 0.742 -0.0710 0.943
tidy(m <- nnet::multinom(change ~ becomeFriends, data=dat |> filter(feature=="f0SD", section=="Lists"))) |>
as_tibble() |>
mutate(estimate = plogis(estimate))
## # weights: 9 (4 variable)
## initial value 41.747267
## iter 10 value 22.350595
## final value 22.346579
## converged
## # A tibble: 4 × 6
## y.level term estimate std.error statistic p.value
## <chr> <chr> <dbl> <dbl> <dbl> <dbl>
## 1 decrease (Intercept) 0.213 1.48 -0.887 0.375
## 2 decrease becomeFriends 0.494 0.241 -0.104 0.917
## 3 increase (Intercept) 0.00178 5.34 -1.19 0.236
## 4 increase becomeFriends 0.611 0.752 0.599 0.549
f0 SD tended to increase more often when the person scored higher in neuroticism.
ggplot(dat |> filter(feature=="f0max", section=="Lists"), aes(change, neuroticism))+
geom_boxplot()
tidy(m <- nnet::multinom(change ~ neuroticism, data=dat |> filter(feature=="f0SD", section=="Lists"))) |>
as_tibble() |>
mutate(estimate = plogis(estimate))
## # weights: 9 (4 variable)
## initial value 41.747267
## iter 10 value 21.441714
## iter 20 value 21.440212
## final value 21.440207
## converged
## # A tibble: 4 × 6
## y.level term estimate std.error statistic p.value
## <chr> <chr> <dbl> <dbl> <dbl> <dbl>
## 1 decrease (Intercept) 0.0110 2.25 -2.00 0.0460
## 2 decrease neuroticism 0.716 0.645 1.43 0.153
## 3 increase (Intercept) 0.0233 4.61 -0.811 0.417
## 4 increase neuroticism 0.527 1.44 0.0753 0.940
tidy(m <- nnet::multinom(change ~ extraversion, data=dat |> filter(feature=="f0SD", section=="Lists"))) |>
as_tibble() |>
mutate(estimate = plogis(estimate))
## # weights: 9 (4 variable)
## initial value 41.747267
## iter 10 value 20.611431
## iter 20 value 20.553190
## iter 30 value 20.550845
## final value 20.550716
## converged
## # A tibble: 4 × 6
## y.level term estimate std.error statistic p.value
## <chr> <chr> <dbl> <dbl> <dbl> <dbl>
## 1 decrease (Intercept) 0.981 2.91 1.36 0.175
## 2 decrease extraversion 0.162 0.911 -1.80 0.0720
## 3 increase (Intercept) 0.0514 6.24 -0.467 0.640
## 4 increase extraversion 0.466 1.76 -0.0784 0.938
tidy(m <- nnet::multinom(change ~ openness, data=dat |> filter(feature=="f0SD", section=="Lists"))) |>
as_tibble() |>
mutate(estimate = plogis(estimate))
## # weights: 9 (4 variable)
## initial value 41.747267
## iter 10 value 22.166485
## final value 22.148128
## converged
## # A tibble: 4 × 6
## y.level term estimate std.error statistic p.value
## <chr> <chr> <dbl> <dbl> <dbl> <dbl>
## 1 decrease (Intercept) 0.425 1.83 -0.164 0.870
## 2 decrease openness 0.418 0.523 -0.635 0.525
## 3 increase (Intercept) 0.403 4.16 -0.0948 0.924
## 4 increase openness 0.286 1.33 -0.685 0.493
tidy(m <- nnet::multinom(change ~ agreeableness, data=dat |> filter(feature=="f0SD", section=="Lists"))) |>
as_tibble() |>
mutate(estimate = plogis(estimate))
## # weights: 9 (4 variable)
## initial value 41.747267
## iter 10 value 22.627857
## iter 20 value 22.373307
## final value 22.373224
## converged
## # A tibble: 4 × 6
## y.level term estimate std.error statistic p.value
## <chr> <chr> <dbl> <dbl> <dbl> <dbl>
## 1 decrease (Intercept) 0.400 2.89 -0.141 0.888
## 2 decrease agreeableness 0.424 0.841 -0.364 0.716
## 3 increase (Intercept) 0.00102 7.65 -0.901 0.368
## 4 increase agreeableness 0.725 2.05 0.475 0.635
tidy(m <- nnet::multinom(change ~ conscientiousness, data=dat |> filter(feature=="f0SD", section=="Lists"))) |>
as_tibble() |>
mutate(estimate = plogis(estimate))
## # weights: 9 (4 variable)
## initial value 41.747267
## iter 10 value 21.216056
## iter 20 value 21.200688
## iter 30 value 21.199622
## iter 40 value 21.199512
## final value 21.199508
## converged
## # A tibble: 4 × 6
## y.level term estimate std.error statistic p.value
## <chr> <chr> <dbl> <dbl> <dbl> <dbl>
## 1 decrease (Intercept) 0.958 2.83 1.11 0.267
## 2 decrease conscientiousness 0.236 0.731 -1.61 0.108
## 3 increase (Intercept) 0.338 6.79 -0.0993 0.921
## 4 increase conscientiousness 0.336 1.72 -0.396 0.692
No effect of forehead temperature change on f0 SD change
ggplot(ts |> filter(section=="Lists", feature=="f0SD", !is.na(Forehead)), aes(Forehead, fill=change))+
geom_bar()+
scale_fill_manual(values = c("f0Decrease" = "lightblue", "f0Increase" = "red", "f0NS" = "gray"))+
ggtitle("Forehead temp. change and f0 SD change")
tidy(m <- nnet::multinom(change ~ Forehead, data=ts |> filter(feature=="f0SD", section=="Lists", !is.na(Forehead)))) |>
as_tibble() |>
mutate(estimate = plogis(estimate))
## Warning in nnet::multinom(change ~ Forehead, data = filter(ts, feature == :
## group 'f0Increase' is empty
## # weights: 4 (3 variable)
## initial value 23.567004
## iter 10 value 17.072759
## iter 20 value 17.033512
## iter 30 value 17.031643
## final value 17.030886
## converged
## # A tibble: 3 × 6
## y.level term estimate std.error statistic p.value
## <chr> <chr> <dbl> <dbl> <dbl> <dbl>
## 1 1 (Intercept) 0.231 0.658 -1.83 0.0674
## 2 1 ForeheadtempDecrease 0.000614 73.7 -0.100 0.920
## 3 1 ForeheadtempIncrease 0.455 0.864 -0.211 0.833
emmeans(m, pairwise~Forehead, by="change")
## $emmeans
## change = f0NS:
## Forehead prob SE df lower.CL upper.CL
## tempNS 0.769247 0.1169 3 0.3974 1.1411
## tempDecrease 0.999816 0.0136 3 0.9566 1.0430
## tempIncrease 0.800001 0.0894 3 0.5154 1.0846
##
## change = f0Decrease:
## Forehead prob SE df lower.CL upper.CL
## tempNS 0.230753 0.1169 3 -0.1411 0.6026
## tempDecrease 0.000184 0.0136 3 -0.0430 0.0434
## tempIncrease 0.199999 0.0894 3 -0.0846 0.4846
##
## Confidence level used: 0.95
##
## $contrasts
## change = f0NS:
## contrast estimate SE df t.ratio p.value
## tempNS - tempDecrease -0.2306 0.1176 3 -1.960 0.2682
## tempNS - tempIncrease -0.0308 0.1472 3 -0.209 0.9764
## tempDecrease - tempIncrease 0.1998 0.0905 3 2.209 0.2154
##
## change = f0Decrease:
## contrast estimate SE df t.ratio p.value
## tempNS - tempDecrease 0.2306 0.1176 3 1.960 0.2682
## tempNS - tempIncrease 0.0308 0.1472 3 0.209 0.9764
## tempDecrease - tempIncrease -0.1998 0.0905 3 -2.209 0.2154
##
## P value adjustment: tukey method for comparing a family of 3 estimates
No effect of nose temperature change on f0 SD change
ggplot(ts |> filter(section=="Lists", feature=="f0SD", !is.na(Nose)), aes(Nose, fill=change))+
geom_bar()+
scale_fill_manual(values = c("f0Decrease" = "lightblue", "f0Increase" = "red", "f0NS" = "gray"))+
ggtitle("Nose temp. change and f0 SD change")
tidy(m <- nnet::multinom(change ~ Nose, data=ts |> filter(feature=="f0SD", section=="Lists", !is.na(Nose)))) |>
as_tibble() |>
mutate(estimate = plogis(estimate))
## Warning in nnet::multinom(change ~ Nose, data = filter(ts, feature == "f0SD", :
## group 'f0Increase' is empty
## # weights: 4 (3 variable)
## initial value 23.567004
## iter 10 value 16.267922
## iter 20 value 16.227023
## iter 30 value 16.224701
## iter 40 value 16.224453
## iter 50 value 16.224165
## final value 16.224118
## converged
## # A tibble: 3 × 6
## y.level term estimate std.error statistic p.value
## <chr> <chr> <dbl> <dbl> <dbl> <dbl>
## 1 1 (Intercept) 0.273 0.677 -1.45 0.147
## 2 1 NosetempDecrease 0.000153 66.1 -0.133 0.894
## 3 1 NosetempIncrease 0.416 0.880 -0.387 0.699
emmeans(m, pairwise~Nose, by="change")
## $emmeans
## change = f0NS:
## Nose prob SE df lower.CL upper.CL
## tempNS 7.27e-01 0.13428 3 0.3000 1.1546
## tempDecrease 1.00e+00 0.00378 3 0.9879 1.0120
## tempIncrease 7.89e-01 0.09353 3 0.4918 1.0871
##
## change = f0Decrease:
## Nose prob SE df lower.CL upper.CL
## tempNS 2.73e-01 0.13428 3 -0.1546 0.7000
## tempDecrease 5.73e-05 0.00378 3 -0.0120 0.0121
## tempIncrease 2.11e-01 0.09353 3 -0.0871 0.5082
##
## Confidence level used: 0.95
##
## $contrasts
## change = f0NS:
## contrast estimate SE df t.ratio p.value
## tempNS - tempDecrease -0.2726 0.1343 3 -2.030 0.2521
## tempNS - tempIncrease -0.0622 0.1636 3 -0.380 0.9254
## tempDecrease - tempIncrease 0.2105 0.0936 3 2.248 0.2082
##
## change = f0Decrease:
## contrast estimate SE df t.ratio p.value
## tempNS - tempDecrease 0.2726 0.1343 3 2.030 0.2521
## tempNS - tempIncrease 0.0622 0.1636 3 0.380 0.9254
## tempDecrease - tempIncrease -0.2105 0.0936 3 -2.248 0.2082
##
## P value adjustment: tukey method for comparing a family of 3 estimates
No effect of eye temperature change on f0 SD change (The model
actually suggests that f0 SD decreased more when the temperature also
decreased. But there are so few data points of
temperature decrease that I don’t think this is robust or
meaningful.)
ggplot(ts |> filter(section=="Lists", feature=="f0SD", !is.na(Eyes)), aes(Eyes, fill=change))+
geom_bar()+
scale_fill_manual(values = c("f0Decrease" = "lightblue", "f0Increase" = "red", "f0NS" = "gray"))+
ggtitle("Eye temp. change and f0 SD change")
tidy(m <- nnet::multinom(change ~ Eyes, data=ts |> filter(feature=="f0SD", section=="Lists", !is.na(Eyes)))) |>
as_tibble() |>
mutate(estimate = plogis(estimate))
## Warning in nnet::multinom(change ~ Eyes, data = filter(ts, feature == "f0SD", :
## group 'f0Increase' is empty
## # weights: 4 (3 variable)
## initial value 20.101268
## iter 10 value 12.252578
## iter 20 value 12.227911
## iter 30 value 12.225898
## iter 40 value 12.225215
## iter 50 value 12.224969
## iter 60 value 12.224617
## iter 70 value 12.224488
## final value 12.224478
## converged
## # A tibble: 3 × 6
## y.level term estimate std.error statistic p.value
## <chr> <chr> <dbl> <dbl> <dbl> <dbl>
## 1 1 (Intercept) 0.0769 1.04 -2.39 0.0170
## 2 1 EyestempDecrease 1.00 58.9 0.181 0.857
## 3 1 EyestempIncrease 0.814 1.19 1.23 0.217
emmeans(m, pairwise~Eyes, by="change") # isn't working (I'm thinking maybe because "eyes" have lots of missing values)
## $emmeans
## change = f0NS:
## Eyes prob SE df lower.CL upper.CL
## tempNS 0.923064 0.0739 3 0.6878 1.1583
## tempDecrease 0.000289 0.0170 3 -0.0538 0.0543
## tempIncrease 0.733334 0.1142 3 0.3700 1.0967
##
## change = f0Decrease:
## Eyes prob SE df lower.CL upper.CL
## tempNS 0.076936 0.0739 3 -0.1583 0.3122
## tempDecrease 0.999711 0.0170 3 0.9457 1.0538
## tempIncrease 0.266666 0.1142 3 -0.0967 0.6300
##
## Confidence level used: 0.95
##
## $contrasts
## change = f0NS:
## contrast estimate SE df t.ratio p.value
## tempNS - tempDecrease 0.923 0.0758 3 12.168 0.0025
## tempNS - tempIncrease 0.190 0.1360 3 1.395 0.4463
## tempDecrease - tempIncrease -0.733 0.1154 3 -6.350 0.0161
##
## change = f0Decrease:
## contrast estimate SE df t.ratio p.value
## tempNS - tempDecrease -0.923 0.0758 3 -12.168 0.0025
## tempNS - tempIncrease -0.190 0.1360 3 -1.395 0.4463
## tempDecrease - tempIncrease 0.733 0.1154 3 6.350 0.0161
##
## P value adjustment: tukey method for comparing a family of 3 estimates
No effect of cheek temperature change on f0 SD change
ggplot(ts |> filter(section=="Lists", feature=="f0SD", !is.na(Cheeks)), aes(Cheeks, fill=change))+
geom_bar()+
scale_fill_manual(values = c("f0Decrease" = "lightblue", "f0Increase" = "red", "f0NS" = "gray"))+
ggtitle("Cheek temp. change and f0 SD change")
tidy(m <- nnet::multinom(change ~ Cheeks, data=ts |> filter(feature=="f0SD", section=="Lists", !is.na(Cheeks)))) |>
as_tibble() |>
mutate(estimate = plogis(estimate))
## Warning in nnet::multinom(change ~ Cheeks, data = filter(ts, feature == : group
## 'f0Increase' is empty
## # weights: 4 (3 variable)
## initial value 23.567004
## iter 10 value 15.940650
## iter 20 value 15.920784
## iter 30 value 15.920036
## iter 40 value 15.919828
## final value 15.919670
## converged
## # A tibble: 3 × 6
## y.level term estimate std.error statistic p.value
## <chr> <chr> <dbl> <dbl> <dbl> <dbl>
## 1 1 (Intercept) 0.174 0.550 -2.83 0.00462
## 2 1 CheekstempDecrease 0.000499 56.3 -0.135 0.893
## 3 1 CheekstempIncrease 0.740 0.914 1.15 0.252
emmeans(m, pairwise~Cheeks, by="change")
## $emmeans
## change = f0NS:
## Cheeks prob SE df lower.CL upper.CL
## tempNS 0.826097 0.07903 3 0.5746 1.0776
## tempDecrease 0.999895 0.00592 3 0.9811 1.0187
## tempIncrease 0.624995 0.17116 3 0.0803 1.1697
##
## change = f0Decrease:
## Cheeks prob SE df lower.CL upper.CL
## tempNS 0.173903 0.07903 3 -0.0776 0.4254
## tempDecrease 0.000105 0.00592 3 -0.0187 0.0189
## tempIncrease 0.375005 0.17116 3 -0.1697 0.9197
##
## Confidence level used: 0.95
##
## $contrasts
## change = f0NS:
## contrast estimate SE df t.ratio p.value
## tempNS - tempDecrease -0.174 0.0793 3 -2.193 0.2184
## tempNS - tempIncrease 0.201 0.1885 3 1.067 0.5928
## tempDecrease - tempIncrease 0.375 0.1713 3 2.189 0.2192
##
## change = f0Decrease:
## contrast estimate SE df t.ratio p.value
## tempNS - tempDecrease 0.174 0.0793 3 2.193 0.2184
## tempNS - tempIncrease -0.201 0.1885 3 -1.067 0.5928
## tempDecrease - tempIncrease -0.375 0.1713 3 -2.189 0.2192
##
## P value adjustment: tukey method for comparing a family of 3 estimates
No effect of condition
ggplot(dat |> filter(feature=="f0median", section=="Diapix"), aes(condition, fill=change))+
geom_bar()+
scale_fill_manual(values = c("decrease" = "lightblue", "increase" = "red", "ns" = "gray"))+
ggtitle("f0 median (second part of exp - Diapix)")
tidy(m <- nnet::multinom(change ~ condition, data=dat |> filter(feature=="f0median", section=="Diapix"))) |>
as_tibble() |>
mutate(estimate = plogis(estimate)) # plogis transforms log odds (the output of the model) into probabilities
## # weights: 9 (4 variable)
## initial value 39.550042
## final value 22.941612
## converged
## # A tibble: 4 × 6
## y.level term estimate std.error statistic p.value
## <chr> <chr> <dbl> <dbl> <dbl> <dbl>
## 1 decrease (Intercept) 0.143 0.764 -2.35 0.0190
## 2 decrease conditionimpersonal 0.273 1.28 -0.765 0.445
## 3 increase (Intercept) 0.250 0.577 -1.90 0.0571
## 4 increase conditionimpersonal 0.158 1.18 -1.42 0.157
emmeans(m, pairwise~condition, by="change")
## $emmeans
## change = ns:
## condition prob SE df lower.CL upper.CL
## close 0.6667 0.1111 4 0.3582 0.975
## impersonal 0.8889 0.0741 4 0.6832 1.095
##
## change = decrease:
## condition prob SE df lower.CL upper.CL
## close 0.1111 0.0741 4 -0.0946 0.317
## impersonal 0.0556 0.0540 4 -0.0943 0.205
##
## change = increase:
## condition prob SE df lower.CL upper.CL
## close 0.2222 0.0980 4 -0.0498 0.494
## impersonal 0.0556 0.0540 4 -0.0943 0.205
##
## Confidence level used: 0.95
##
## $contrasts
## change = ns:
## contrast estimate SE df t.ratio p.value
## close - impersonal -0.2222 0.1335 4 -1.664 0.1714
##
## change = decrease:
## contrast estimate SE df t.ratio p.value
## close - impersonal 0.0556 0.0917 4 0.606 0.5772
##
## change = increase:
## contrast estimate SE df t.ratio p.value
## close - impersonal 0.1667 0.1119 4 1.490 0.2106
No effect of perception of partner
tidy(m <- nnet::multinom(change ~ closeness, data=dat |> filter(feature=="f0median", section=="Diapix"))) |>
as_tibble() |>
mutate(estimate = plogis(estimate))
## # weights: 9 (4 variable)
## initial value 39.550042
## iter 10 value 22.366200
## final value 22.363720
## converged
## # A tibble: 4 × 6
## y.level term estimate std.error statistic p.value
## <chr> <chr> <dbl> <dbl> <dbl> <dbl>
## 1 decrease (Intercept) 0.151 1.27 -1.36 0.174
## 2 decrease closeness 0.461 0.367 -0.428 0.669
## 3 increase (Intercept) 0.0199 1.48 -2.62 0.00867
## 4 increase closeness 0.623 0.286 1.75 0.0795
tidy(m <- nnet::multinom(change ~ similarity, data=dat |> filter(feature=="f0median", section=="Diapix"))) |>
as_tibble() |>
mutate(estimate = plogis(estimate))
## # weights: 9 (4 variable)
## initial value 39.550042
## iter 10 value 24.118930
## final value 24.118928
## converged
## # A tibble: 4 × 6
## y.level term estimate std.error statistic p.value
## <chr> <chr> <dbl> <dbl> <dbl> <dbl>
## 1 decrease (Intercept) 0.0413 2.15 -1.46 0.144
## 2 decrease similarity 0.540 0.347 0.456 0.648
## 3 increase (Intercept) 0.0671 1.71 -1.54 0.125
## 4 increase similarity 0.539 0.277 0.570 0.569
tidy(m <- nnet::multinom(change ~ likeability, data=dat |> filter(feature=="f0median", section=="Diapix"))) |>
as_tibble() |>
mutate(estimate = plogis(estimate))
## # weights: 9 (4 variable)
## initial value 39.550042
## iter 10 value 24.140354
## iter 20 value 24.125622
## iter 20 value 24.125622
## iter 20 value 24.125622
## final value 24.125622
## converged
## # A tibble: 4 × 6
## y.level term estimate std.error statistic p.value
## <chr> <chr> <dbl> <dbl> <dbl> <dbl>
## 1 decrease (Intercept) 0.0120 3.45 -1.28 0.201
## 2 decrease likeability 0.577 0.467 0.662 0.508
## 3 increase (Intercept) 0.132 2.36 -0.799 0.424
## 4 increase likeability 0.506 0.340 0.0716 0.943
tidy(m <- nnet::multinom(change ~ becomeFriends, data=dat |> filter(feature=="f0median", section=="Diapix"))) |>
as_tibble() |>
mutate(estimate = plogis(estimate))
## # weights: 9 (4 variable)
## initial value 39.550042
## iter 10 value 23.489730
## final value 23.488963
## converged
## # A tibble: 4 × 6
## y.level term estimate std.error statistic p.value
## <chr> <chr> <dbl> <dbl> <dbl> <dbl>
## 1 decrease (Intercept) 0.00332 3.23 -1.76 0.0776
## 2 decrease becomeFriends 0.632 0.461 1.17 0.240
## 3 increase (Intercept) 0.0911 1.80 -1.28 0.200
## 4 increase becomeFriends 0.525 0.290 0.340 0.734
No effect of BFI scores
tidy(m <- nnet::multinom(change ~ extraversion, data=dat |> filter(feature=="f0median", section=="Diapix"))) |>
as_tibble() |>
mutate(estimate = plogis(estimate))
## # weights: 9 (4 variable)
## initial value 39.550042
## iter 10 value 23.964502
## iter 20 value 23.934691
## final value 23.934585
## converged
## # A tibble: 4 × 6
## y.level term estimate std.error statistic p.value
## <chr> <chr> <dbl> <dbl> <dbl> <dbl>
## 1 decrease (Intercept) 0.0190 3.60 -1.10 0.273
## 2 decrease extraversion 0.620 0.995 0.493 0.622
## 3 increase (Intercept) 0.0167 2.93 -1.39 0.165
## 4 increase extraversion 0.661 0.802 0.832 0.405
tidy(m <- nnet::multinom(change ~ openness, data=dat |> filter(feature=="f0median", section=="Diapix"))) |>
as_tibble() |>
mutate(estimate = plogis(estimate))
## # weights: 9 (4 variable)
## initial value 39.550042
## iter 10 value 23.891857
## iter 20 value 23.881221
## iter 20 value 23.881221
## final value 23.881221
## converged
## # A tibble: 4 × 6
## y.level term estimate std.error statistic p.value
## <chr> <chr> <dbl> <dbl> <dbl> <dbl>
## 1 decrease (Intercept) 0.0559 2.78 -1.02 0.308
## 2 decrease openness 0.542 0.752 0.222 0.825
## 3 increase (Intercept) 0.0187 2.53 -1.57 0.117
## 4 increase openness 0.647 0.648 0.935 0.350
tidy(m <- nnet::multinom(change ~ agreeableness, data=dat |> filter(feature=="f0median", section=="Diapix"))) |>
as_tibble() |>
mutate(estimate = plogis(estimate))
## # weights: 9 (4 variable)
## initial value 39.550042
## iter 10 value 23.912500
## final value 23.903971
## converged
## # A tibble: 4 × 6
## y.level term estimate std.error statistic p.value
## <chr> <chr> <dbl> <dbl> <dbl> <dbl>
## 1 decrease (Intercept) 0.383 4.30 -0.111 0.912
## 2 decrease agreeableness 0.372 1.29 -0.406 0.685
## 3 increase (Intercept) 0.0106 3.61 -1.25 0.210
## 4 increase agreeableness 0.691 1.00 0.800 0.424
tidy(m <- nnet::multinom(change ~ conscientiousness, data=dat |> filter(feature=="f0median", section=="Diapix"))) |>
as_tibble() |>
mutate(estimate = plogis(estimate))
## # weights: 9 (4 variable)
## initial value 39.550042
## iter 10 value 24.226126
## final value 24.220072
## converged
## # A tibble: 4 × 6
## y.level term estimate std.error statistic p.value
## <chr> <chr> <dbl> <dbl> <dbl> <dbl>
## 1 decrease (Intercept) 0.00894 4.99 -0.944 0.345
## 2 decrease conscientiousness 0.647 1.19 0.509 0.611
## 3 increase (Intercept) 0.126 3.39 -0.571 0.568
## 4 increase conscientiousness 0.513 0.836 0.0642 0.949
tidy(m <- nnet::multinom(change ~ neuroticism, data=dat |> filter(feature=="f0median", section=="Diapix"))) |>
as_tibble() |>
mutate(estimate = plogis(estimate))
## # weights: 9 (4 variable)
## initial value 39.550042
## iter 10 value 21.246273
## final value 21.240362
## converged
## # A tibble: 4 × 6
## y.level term estimate std.error statistic p.value
## <chr> <chr> <dbl> <dbl> <dbl> <dbl>
## 1 decrease (Intercept) 0.963 2.84 1.15 0.250
## 2 decrease neuroticism 0.129 1.07 -1.78 0.0745
## 3 increase (Intercept) 0.890 2.25 0.928 0.354
## 4 increase neuroticism 0.221 0.774 -1.63 0.103
No effect of forehead temperature change on f0 median change
ggplot(ts |> filter(section=="Diapix", feature=="f0median"), aes(Forehead, fill=change))+
geom_bar()+
scale_fill_manual(values = c("f0Decrease" = "lightblue", "f0Increase" = "red", "f0NS" = "gray"))+
ggtitle("Forehead temp. change and f0 median change")
tidy(m <- nnet::multinom(change ~ Forehead, data=ts |> filter(feature=="f0median", section=="Diapix"))) |>
as_tibble() |>
mutate(estimate = plogis(estimate))
## # weights: 12 (6 variable)
## initial value 39.550042
## iter 10 value 23.853671
## iter 20 value 23.834187
## final value 23.832013
## converged
## # A tibble: 6 × 6
## y.level term estimate std.error statistic p.value
## <chr> <chr> <dbl> <dbl> <dbl> <dbl>
## 1 f0Decrease (Intercept) 0.0714 1.04 -2.47 0.0134
## 2 f0Decrease ForeheadtempDecrease 0.00137 97.5 -0.0676 0.946
## 3 f0Decrease ForeheadtempIncrease 0.650 1.28 0.482 0.630
## 4 f0Increase (Intercept) 0.187 0.641 -2.29 0.0220
## 5 f0Increase ForeheadtempDecrease 0.000688 79.3 -0.0918 0.927
## 6 f0Increase ForeheadtempIncrease 0.382 0.991 -0.484 0.629
emmeans(m, pairwise~Forehead, by="change")
## $emmeans
## change = f0NS:
## Forehead prob SE df lower.CL upper.CL
## tempNS 0.764745 0.1029 6 0.5130 1.0165
## tempDecrease 0.999736 0.0162 6 0.9600 1.0395
## tempIncrease 0.777777 0.0980 6 0.5380 1.0176
##
## change = f0Decrease:
## Forehead prob SE df lower.CL upper.CL
## tempNS 0.058837 0.0571 6 -0.0808 0.1985
## tempDecrease 0.000105 0.0103 6 -0.0250 0.0252
## tempIncrease 0.111105 0.0741 6 -0.0701 0.2924
##
## change = f0Increase:
## Forehead prob SE df lower.CL upper.CL
## tempNS 0.176418 0.0924 6 -0.0498 0.4026
## tempDecrease 0.000159 0.0126 6 -0.0307 0.0310
## tempIncrease 0.111118 0.0741 6 -0.0701 0.2924
##
## Confidence level used: 0.95
##
## $contrasts
## change = f0NS:
## contrast estimate SE df t.ratio p.value
## tempNS - tempDecrease -0.2350 0.1041 6 -2.256 0.1393
## tempNS - tempIncrease -0.0130 0.1421 6 -0.092 0.9954
## tempDecrease - tempIncrease 0.2220 0.0993 6 2.235 0.1432
##
## change = f0Decrease:
## contrast estimate SE df t.ratio p.value
## tempNS - tempDecrease 0.0587 0.0580 6 1.013 0.5963
## tempNS - tempIncrease -0.0523 0.0935 6 -0.559 0.8459
## tempDecrease - tempIncrease -0.1110 0.0748 6 -1.484 0.3620
##
## change = f0Increase:
## contrast estimate SE df t.ratio p.value
## tempNS - tempDecrease 0.1763 0.0933 6 1.889 0.2218
## tempNS - tempIncrease 0.0653 0.1185 6 0.551 0.8497
## tempDecrease - tempIncrease -0.1110 0.0751 6 -1.477 0.3652
##
## P value adjustment: tukey method for comparing a family of 3 estimates
No effect of nose temperature change on f0 median change
ggplot(ts |> filter(section=="Diapix", feature=="f0median"), aes(Nose, fill=change))+
geom_bar()+
scale_fill_manual(values = c("f0Decrease" = "lightblue", "f0Increase" = "red", "f0NS" = "gray"))+
ggtitle("Nose temp. change and f0 median change")
tidy(m <- nnet::multinom(change ~ Nose, data=ts |> filter(feature=="f0median", section=="Diapix"))) |>
as_tibble() |>
mutate(estimate = plogis(estimate))
## # weights: 12 (6 variable)
## initial value 39.550042
## iter 10 value 23.191500
## iter 20 value 23.175791
## iter 30 value 23.174501
## final value 23.174500
## converged
## # A tibble: 6 × 6
## y.level term estimate std.error statistic p.value
## <chr> <chr> <dbl> <dbl> <dbl> <dbl>
## 1 f0Decrease (Intercept) 0.125 1.07 -1.82 0.0687
## 2 f0Decrease NosetempDecrease 0.0000452 197. -0.0509 0.959
## 3 f0Decrease NosetempIncrease 0.452 1.30 -0.149 0.882
## 4 f0Increase (Intercept) 0.222 0.802 -1.56 0.118
## 5 f0Increase NosetempDecrease 0.0000985 94.3 -0.0979 0.922
## 6 f0Increase NosetempIncrease 0.382 1.02 -0.474 0.636
emmeans(m, pairwise~Nose, by="change")
## $emmeans
## change = f0NS:
## Nose prob SE df lower.CL upper.CL
## tempNS 7.00e-01 0.14491 6 0.34543 1.05460
## tempDecrease 1.00e+00 0.00294 6 0.99277 1.00716
## tempIncrease 7.73e-01 0.08935 6 0.55411 0.99135
##
## change = f0Decrease:
## Nose prob SE df lower.CL upper.CL
## tempNS 1.00e-01 0.09487 6 -0.13213 0.33213
## tempDecrease 6.50e-06 0.00127 6 -0.00310 0.00312
## tempIncrease 9.09e-02 0.06129 6 -0.05906 0.24089
##
## change = f0Increase:
## Nose prob SE df lower.CL upper.CL
## tempNS 2.00e-01 0.12649 6 -0.10952 0.50949
## tempDecrease 2.81e-05 0.00265 6 -0.00646 0.00652
## tempIncrease 1.36e-01 0.07316 6 -0.04267 0.31538
##
## Confidence level used: 0.95
##
## $contrasts
## change = f0NS:
## contrast estimate SE df t.ratio p.value
## tempNS - tempDecrease -0.29995 0.1449 6 -2.069 0.1767
## tempNS - tempIncrease -0.07272 0.1702 6 -0.427 0.9058
## tempDecrease - tempIncrease 0.22723 0.0894 6 2.542 0.0968
##
## change = f0Decrease:
## contrast estimate SE df t.ratio p.value
## tempNS - tempDecrease 0.09999 0.0949 6 1.054 0.5735
## tempNS - tempIncrease 0.00908 0.1129 6 0.080 0.9964
## tempDecrease - tempIncrease -0.09091 0.0613 6 -1.483 0.3626
##
## change = f0Increase:
## contrast estimate SE df t.ratio p.value
## tempNS - tempDecrease 0.19996 0.1265 6 1.581 0.3234
## tempNS - tempIncrease 0.06363 0.1461 6 0.435 0.9024
## tempDecrease - tempIncrease -0.13633 0.0732 6 -1.862 0.2294
##
## P value adjustment: tukey method for comparing a family of 3 estimates
No effect of eye temperature change on f0 median change
ggplot(ts |> filter(section=="Diapix", feature=="f0median", !is.na(Eyes)), aes(Eyes, fill=change))+
geom_bar()+
scale_fill_manual(values = c("f0Decrease" = "lightblue", "f0Increase" = "red", "f0NS" = "gray"))+
ggtitle("Eye temp. change and f0 median change")
tidy(m <- nnet::multinom(change ~ Eyes, data=ts |> filter(feature=="f0median", section=="Diapix", !is.na(Eyes)))) |>
as_tibble() |>
mutate(estimate = plogis(estimate))
## # weights: 12 (6 variable)
## initial value 31.859756
## iter 10 value 18.287158
## iter 20 value 18.265923
## final value 18.264576
## converged
## # A tibble: 6 × 6
## y.level term estimate std.error statistic p.value
## <chr> <chr> <dbl> <dbl> <dbl> <dbl>
## 1 f0Decrease (Intercept) 0.0000229 66.1 -0.162 0.872
## 2 f0Decrease EyestempDecrease 1.00 66.1 0.162 0.872
## 3 f0Decrease EyestempIncrease 1.00 66.1 0.136 0.892
## 4 f0Increase (Intercept) 0.167 0.775 -2.08 0.0377
## 5 f0Increase EyestempDecrease 0.00000849 767. -0.0152 0.988
## 6 f0Increase EyestempIncrease 0.476 1.09 -0.0873 0.930
emmeans(m, pairwise~Eyes, by="change")
## $emmeans
## change = f0NS:
## Eyes prob SE df lower.CL upper.CL
## tempNS 8.33e-01 0.107593 6 0.57002 1.09656
## tempDecrease 5.00e-01 0.353553 6 -0.36510 1.36513
## tempIncrease 7.33e-01 0.114182 6 0.45393 1.01271
##
## change = f0Decrease:
## Eyes prob SE df lower.CL upper.CL
## tempNS 1.91e-05 0.001262 6 -0.00307 0.00311
## tempDecrease 5.00e-01 0.353553 6 -0.36513 1.36510
## tempIncrease 1.33e-01 0.087768 6 -0.08144 0.34809
##
## change = f0Increase:
## Eyes prob SE df lower.CL upper.CL
## tempNS 1.67e-01 0.107588 6 -0.09657 0.42995
## tempDecrease 8.00e-07 0.000652 6 -0.00159 0.00160
## tempIncrease 1.33e-01 0.087777 6 -0.08143 0.34814
##
## Confidence level used: 0.95
##
## $contrasts
## change = f0NS:
## contrast estimate SE df t.ratio p.value
## tempNS - tempDecrease 0.3333 0.3696 6 0.902 0.6589
## tempNS - tempIncrease 0.1000 0.1569 6 0.637 0.8060
## tempDecrease - tempIncrease -0.2333 0.3715 6 -0.628 0.8108
##
## change = f0Decrease:
## contrast estimate SE df t.ratio p.value
## tempNS - tempDecrease -0.5000 0.3536 6 -1.414 0.3923
## tempNS - tempIncrease -0.1333 0.0878 6 -1.519 0.3478
## tempDecrease - tempIncrease 0.3667 0.3643 6 1.007 0.5998
##
## change = f0Increase:
## contrast estimate SE df t.ratio p.value
## tempNS - tempDecrease 0.1667 0.1076 6 1.549 0.3355
## tempNS - tempIncrease 0.0333 0.1389 6 0.240 0.9689
## tempDecrease - tempIncrease -0.1334 0.0878 6 -1.519 0.3476
##
## P value adjustment: tukey method for comparing a family of 3 estimates
No effect of cheek temperature change on f0 median change
ggplot(ts |> filter(section=="Diapix", feature=="f0median"), aes(Cheeks, fill=change))+
geom_bar()+
scale_fill_manual(values = c("f0Decrease" = "lightblue", "f0Increase" = "red", "f0NS" = "gray"))+
ggtitle("Cheek temp. change and f0 median change")
tidy(m <- nnet::multinom(change ~ Cheeks, data=ts |> filter(feature=="f0median", section=="Diapix"))) |>
as_tibble() |>
mutate(estimate = plogis(estimate))
## # weights: 12 (6 variable)
## initial value 39.550042
## iter 10 value 24.066376
## iter 10 value 24.066376
## iter 10 value 24.066376
## final value 24.066376
## converged
## # A tibble: 6 × 6
## y.level term estimate std.error statistic p.value
## <chr> <chr> <dbl> <dbl> <dbl> <dbl>
## 1 f0Decrease (Intercept) 0.0625 1.03 -2.62 0.00874
## 2 f0Decrease CheekstempDecrease 0.682 1.49 0.513 0.608
## 3 f0Decrease CheekstempIncrease 0.714 1.49 0.613 0.540
## 4 f0Increase (Intercept) 0.167 0.632 -2.54 0.0109
## 5 f0Increase CheekstempDecrease 0.417 1.24 -0.271 0.786
## 6 f0Increase CheekstempIncrease 0.455 1.25 -0.146 0.884
emmeans(m, pairwise~Cheeks, by="change")
## $emmeans
## change = f0NS:
## Cheeks prob SE df lower.CL upper.CL
## tempNS 0.7895 0.0935 6 0.5606 1.018
## tempDecrease 0.7778 0.1386 6 0.4387 1.117
## tempIncrease 0.7500 0.1531 6 0.3754 1.125
##
## change = f0Decrease:
## Cheeks prob SE df lower.CL upper.CL
## tempNS 0.0526 0.0512 6 -0.0727 0.178
## tempDecrease 0.1111 0.1048 6 -0.1452 0.367
## tempIncrease 0.1250 0.1169 6 -0.1611 0.411
##
## change = f0Increase:
## Cheeks prob SE df lower.CL upper.CL
## tempNS 0.1579 0.0837 6 -0.0468 0.363
## tempDecrease 0.1111 0.1048 6 -0.1452 0.367
## tempIncrease 0.1250 0.1169 6 -0.1611 0.411
##
## Confidence level used: 0.95
##
## $contrasts
## change = f0NS:
## contrast estimate SE df t.ratio p.value
## tempNS - tempDecrease 0.0117 0.167 6 0.070 0.9973
## tempNS - tempIncrease 0.0395 0.179 6 0.220 0.9738
## tempDecrease - tempIncrease 0.0278 0.206 6 0.135 0.9901
##
## change = f0Decrease:
## contrast estimate SE df t.ratio p.value
## tempNS - tempDecrease -0.0585 0.117 6 -0.501 0.8733
## tempNS - tempIncrease -0.0724 0.128 6 -0.567 0.8420
## tempDecrease - tempIncrease -0.0139 0.157 6 -0.088 0.9957
##
## change = f0Increase:
## contrast estimate SE df t.ratio p.value
## tempNS - tempDecrease 0.0468 0.134 6 0.349 0.9358
## tempNS - tempIncrease 0.0329 0.144 6 0.229 0.9717
## tempDecrease - tempIncrease -0.0139 0.157 6 -0.088 0.9957
##
## P value adjustment: tukey method for comparing a family of 3 estimates
No effect of condition
ggplot(dat |> filter(feature=="f0max", section=="Diapix"), aes(condition, fill=change))+
geom_bar()+
scale_fill_manual(values = c("decrease" = "lightblue", "increase" = "red", "ns" = "gray"))+
ggtitle("f0 max (second part of exp - Diapix)")
tidy(m <- nnet::multinom(change ~ condition, data=dat |> filter(feature=="f0max", section=="Diapix"))) |>
as_tibble() |>
mutate(estimate = plogis(estimate)) # plogis transforms log odds (the output of the model) into probabilities
## # weights: 9 (4 variable)
## initial value 39.550042
## iter 10 value 19.563952
## iter 20 value 19.554989
## iter 30 value 19.554804
## final value 19.554788
## converged
## # A tibble: 4 × 6
## y.level term estimate std.error statistic p.value
## <chr> <chr> <dbl> <dbl> <dbl> <dbl>
## 1 decrease (Intercept) 0.222 0.567 -2.21 0.0271
## 2 decrease conditionimpersonal 0.318 0.942 -0.809 0.419
## 3 increase (Intercept) 0.0000308 48.1 -0.216 0.829
## 4 increase conditionimpersonal 1.00 48.1 0.160 0.873
emmeans(m, pairwise~condition, by="change")
## $emmeans
## change = ns:
## condition prob SE df lower.CL upper.CL
## close 0.777757 0.09799 4 0.50568 1.04983
## impersonal 0.833352 0.08784 4 0.58948 1.07723
##
## change = decrease:
## condition prob SE df lower.CL upper.CL
## close 0.222219 0.09799 4 -0.04985 0.49428
## impersonal 0.111107 0.07407 4 -0.09455 0.31677
##
## change = increase:
## condition prob SE df lower.CL upper.CL
## close 0.000024 0.00115 4 -0.00318 0.00323
## impersonal 0.055541 0.05398 4 -0.09434 0.20542
##
## Confidence level used: 0.95
##
## $contrasts
## change = ns:
## contrast estimate SE df t.ratio p.value
## close - impersonal -0.0556 0.132 4 -0.422 0.6944
##
## change = decrease:
## contrast estimate SE df t.ratio p.value
## close - impersonal 0.1111 0.123 4 0.905 0.4169
##
## change = increase:
## contrast estimate SE df t.ratio p.value
## close - impersonal -0.0555 0.054 4 -1.028 0.3620
No effect of perception of partner
tidy(m <- nnet::multinom(change ~ closeness, data=dat |> filter(feature=="f0max", section=="Diapix"))) |>
as_tibble() |>
mutate(estimate = plogis(estimate))
## # weights: 9 (4 variable)
## initial value 39.550042
## iter 10 value 20.099267
## final value 19.953626
## converged
## # A tibble: 4 × 6
## y.level term estimate std.error statistic p.value
## <chr> <chr> <dbl> <dbl> <dbl> <dbl>
## 1 decrease (Intercept) 0.251 0.963 -1.14 0.256
## 2 decrease closeness 0.466 0.250 -0.542 0.588
## 3 increase (Intercept) 0.208 2.06 -0.650 0.516
## 4 increase closeness 0.324 0.871 -0.846 0.398
tidy(m <- nnet::multinom(change ~ similarity, data=dat |> filter(feature=="f0max", section=="Diapix"))) |>
as_tibble() |>
mutate(estimate = plogis(estimate))
## # weights: 9 (4 variable)
## initial value 39.550042
## iter 10 value 19.222474
## final value 19.218495
## converged
## # A tibble: 4 × 6
## y.level term estimate std.error statistic p.value
## <chr> <chr> <dbl> <dbl> <dbl> <dbl>
## 1 decrease (Intercept) 0.237 1.48 -0.792 0.428
## 2 decrease similarity 0.482 0.254 -0.283 0.777
## 3 increase (Intercept) 0.908 3.87 0.592 0.554
## 4 increase similarity 0.198 1.19 -1.18 0.239
tidy(m <- nnet::multinom(change ~ likeability, data=dat |> filter(feature=="f0max", section=="Diapix"))) |>
as_tibble() |>
mutate(estimate = plogis(estimate))
## # weights: 9 (4 variable)
## initial value 39.550042
## iter 10 value 17.570607
## iter 20 value 17.450132
## iter 30 value 17.394468
## iter 40 value 17.361615
## iter 50 value 17.334418
## iter 60 value 17.323287
## iter 70 value 17.310066
## iter 80 value 17.305319
## iter 90 value 17.297222
## iter 100 value 17.291670
## final value 17.291670
## stopped after 100 iterations
## # A tibble: 4 × 6
## y.level term estimate std.error statistic p.value
## <chr> <chr> <dbl> <dbl> <dbl> <dbl>
## 1 decrease (Intercept) 0.0448 2.50 -1.22 0.221
## 2 decrease likeability 0.552 0.344 0.611 0.541
## 3 increase (Intercept) 1.00 27.1 0.841 0.400
## 4 increase likeability 0.00345 6.68 -0.848 0.396
tidy(m <- nnet::multinom(change ~ becomeFriends, data=dat |> filter(feature=="f0max", section=="Diapix"))) |>
as_tibble() |>
mutate(estimate = plogis(estimate))
## # weights: 9 (4 variable)
## initial value 39.550042
## iter 10 value 19.177535
## final value 19.176834
## converged
## # A tibble: 4 × 6
## y.level term estimate std.error statistic p.value
## <chr> <chr> <dbl> <dbl> <dbl> <dbl>
## 1 decrease (Intercept) 0.214 1.61 -0.809 0.419
## 2 decrease becomeFriends 0.488 0.264 -0.176 0.861
## 3 increase (Intercept) 0.856 3.40 0.525 0.600
## 4 increase becomeFriends 0.229 1.04 -1.17 0.242
No effect of BFI scores
tidy(m <- nnet::multinom(change ~ extraversion, data=dat |> filter(feature=="f0max", section=="Diapix"))) |>
as_tibble() |>
mutate(estimate = plogis(estimate))
## # weights: 9 (4 variable)
## initial value 39.550042
## iter 10 value 19.805220
## iter 20 value 19.794081
## final value 19.793509
## converged
## # A tibble: 4 × 6
## y.level term estimate std.error statistic p.value
## <chr> <chr> <dbl> <dbl> <dbl> <dbl>
## 1 decrease (Intercept) 0.840 2.74 0.604 0.546
## 2 decrease extraversion 0.276 0.835 -1.16 0.247
## 3 increase (Intercept) 0.319 6.03 -0.126 0.900
## 4 increase extraversion 0.316 1.83 -0.423 0.672
tidy(m <- nnet::multinom(change ~ openness, data=dat |> filter(feature=="f0max", section=="Diapix"))) |>
as_tibble() |>
mutate(estimate = plogis(estimate))
## # weights: 9 (4 variable)
## initial value 39.550042
## iter 10 value 18.607126
## iter 20 value 17.697111
## iter 30 value 17.661712
## iter 40 value 17.634032
## iter 50 value 17.615169
## iter 60 value 17.601253
## iter 70 value 17.589783
## iter 80 value 17.579500
## iter 90 value 17.569641
## iter 100 value 17.559319
## final value 17.559319
## stopped after 100 iterations
## # A tibble: 4 × 6
## y.level term estimate std.error statistic p.value
## <chr> <chr> <dbl> <dbl> <dbl> <dbl>
## 1 decrease (Intercept) 0.245 2.05 -0.551 0.582
## 2 decrease openness 0.470 0.562 -0.214 0.830
## 3 increase (Intercept) 1.00 39.2 0.739 0.460
## 4 increase openness 0.000000480 19.3 -0.754 0.451
tidy(m <- nnet::multinom(change ~ agreeableness, data=dat |> filter(feature=="f0max", section=="Diapix"))) |>
as_tibble() |>
mutate(estimate = plogis(estimate))
## # weights: 9 (4 variable)
## initial value 39.550042
## iter 10 value 19.820397
## iter 20 value 19.602438
## iter 30 value 19.600692
## final value 19.600676
## converged
## # A tibble: 4 × 6
## y.level term estimate std.error statistic p.value
## <chr> <chr> <dbl> <dbl> <dbl> <dbl>
## 1 decrease (Intercept) 0.576 3.21 0.0951 0.924
## 2 decrease agreeableness 0.365 0.948 -0.584 0.559
## 3 increase (Intercept) 1.00 10.1 0.794 0.427
## 4 increase agreeableness 0.0250 3.49 -1.05 0.294
tidy(m <- nnet::multinom(change ~ conscientiousness, data=dat |> filter(feature=="f0max", section=="Diapix"))) |>
as_tibble() |>
mutate(estimate = plogis(estimate))
## # weights: 9 (4 variable)
## initial value 39.550042
## iter 10 value 18.939842
## iter 20 value 18.429248
## final value 18.429138
## converged
## # A tibble: 4 × 6
## y.level term estimate std.error statistic p.value
## <chr> <chr> <dbl> <dbl> <dbl> <dbl>
## 1 decrease (Intercept) 0.540 3.15 0.0513 0.959
## 2 decrease conscientiousness 0.393 0.786 -0.551 0.582
## 3 increase (Intercept) 1.00 8.46 1.18 0.240
## 4 increase conscientiousness 0.0186 2.95 -1.34 0.179
tidy(m <- nnet::multinom(change ~ neuroticism, data=dat |> filter(feature=="f0max", section=="Diapix"))) |>
as_tibble() |>
mutate(estimate = plogis(estimate))
## # weights: 9 (4 variable)
## initial value 39.550042
## iter 10 value 20.073513
## iter 20 value 20.069541
## final value 20.069512
## converged
## # A tibble: 4 × 6
## y.level term estimate std.error statistic p.value
## <chr> <chr> <dbl> <dbl> <dbl> <dbl>
## 1 decrease (Intercept) 0.583 1.92 0.174 0.862
## 2 decrease neuroticism 0.349 0.630 -0.990 0.322
## 3 increase (Intercept) 0.116 4.31 -0.472 0.637
## 4 increase neuroticism 0.395 1.39 -0.307 0.759
No effect of forehead temperature change on f0 max change
ggplot(ts |> filter(section=="Diapix", feature=="f0max"), aes(Forehead, fill=change))+
geom_bar()+
scale_fill_manual(values = c("f0Decrease" = "lightblue", "f0Increase" = "red", "f0NS" = "gray"))+
ggtitle("Forehead temp. change and f0 max change")
tidy(m <- nnet::multinom(change ~ Forehead, data=ts |> filter(feature=="f0max", section=="Diapix"))) |>
as_tibble() |>
mutate(estimate = plogis(estimate))
## # weights: 12 (6 variable)
## initial value 39.550042
## iter 10 value 18.005433
## iter 20 value 17.969731
## iter 30 value 17.964401
## iter 30 value 17.964401
## iter 30 value 17.964401
## final value 17.964401
## converged
## # A tibble: 6 × 6
## y.level term estimate std.error statistic p.value
## <chr> <chr> <dbl> <dbl> <dbl> <dbl>
## 1 f0Decrease (Intercept) 0.0588 1.03 -2.69 0.00715
## 2 f0Decrease ForeheadtempDecrease 0.00183 93.3 -0.0675 0.946
## 3 f0Decrease ForeheadtempIncrease 0.870 1.16 1.64 0.102
## 4 f0Increase (Intercept) 0.0000278 47.4 -0.221 0.825
## 5 f0Increase ForeheadtempDecrease 0.300 294. -0.00289 0.998
## 6 f0Increase ForeheadtempIncrease 1.00 47.4 0.169 0.866
emmeans(m, pairwise~Forehead, by="change")
## $emmeans
## change = f0NS:
## Forehead prob SE df lower.CL upper.CL
## tempNS 9.41e-01 0.05708 6 0.80147 1.08082
## tempDecrease 1.00e+00 0.01126 6 0.97233 1.02741
## tempIncrease 6.67e-01 0.11111 6 0.39479 0.93855
##
## change = f0Decrease:
## Forehead prob SE df lower.CL upper.CL
## tempNS 5.88e-02 0.05707 6 -0.08082 0.19847
## tempDecrease 1.15e-04 0.01071 6 -0.02610 0.02633
## tempIncrease 2.78e-01 0.10557 6 0.01946 0.53611
##
## change = f0Increase:
## Forehead prob SE df lower.CL upper.CL
## tempNS 2.62e-05 0.00124 6 -0.00301 0.00306
## tempDecrease 1.19e-05 0.00345 6 -0.00844 0.00846
## tempIncrease 5.55e-02 0.05399 6 -0.07655 0.18764
##
## Confidence level used: 0.95
##
## $contrasts
## change = f0NS:
## contrast estimate SE df t.ratio p.value
## tempNS - tempDecrease -5.87e-02 0.05818 6 -1.009 0.5982
## tempNS - tempIncrease 2.74e-01 0.12492 6 2.197 0.1502
## tempDecrease - tempIncrease 3.33e-01 0.11168 6 2.984 0.0555
##
## change = f0Decrease:
## contrast estimate SE df t.ratio p.value
## tempNS - tempDecrease 5.87e-02 0.05807 6 1.011 0.5972
## tempNS - tempIncrease -2.19e-01 0.12001 6 -1.824 0.2404
## tempDecrease - tempIncrease -2.78e-01 0.10611 6 -2.617 0.0880
##
## change = f0Increase:
## contrast estimate SE df t.ratio p.value
## tempNS - tempDecrease 1.43e-05 0.00367 6 0.004 1.0000
## tempNS - tempIncrease -5.55e-02 0.05400 6 -1.028 0.5878
## tempDecrease - tempIncrease -5.55e-02 0.05410 6 -1.027 0.5887
##
## P value adjustment: tukey method for comparing a family of 3 estimates
No effect of nose temperature change on f0 max change
ggplot(ts |> filter(section=="Diapix", feature=="f0max"), aes(Nose, fill=change))+
geom_bar()+
scale_fill_manual(values = c("f0Decrease" = "lightblue", "f0Increase" = "red", "f0NS" = "gray"))+
ggtitle("Nose temp. change and f0 max change")
tidy(m <- nnet::multinom(change ~ Nose, data=ts |> filter(feature=="f0max", section=="Diapix"))) |>
as_tibble() |>
mutate(estimate = plogis(estimate))
## # weights: 12 (6 variable)
## initial value 39.550042
## iter 10 value 17.962791
## iter 20 value 17.851451
## iter 30 value 17.844235
## iter 30 value 17.844235
## iter 30 value 17.844235
## final value 17.844235
## converged
## # A tibble: 6 × 6
## y.level term estimate std.error statistic p.value
## <chr> <chr> <dbl> <dbl> <dbl> <dbl>
## 1 f0Decrease (Intercept) 0.0000490 45.2 -0.220 0.826
## 2 f0Decrease NosetempDecrease 1.00 45.2 0.195 0.845
## 3 f0Decrease NosetempIncrease 1.00 45.2 0.194 0.846
## 4 f0Increase (Intercept) 0.00000771 114. -0.103 0.918
## 5 f0Increase NosetempDecrease 0.0797 716. -0.00342 0.997
## 6 f0Increase NosetempIncrease 1.00 114. 0.0790 0.937
emmeans(m, pairwise~Nose, by="change")
## $emmeans
## change = f0NS:
## Nose prob SE df lower.CL upper.CL
## tempNS 0.9999433 0.0023816 6 0.994116 1.005771
## tempDecrease 0.7500406 0.2164946 6 0.220297 1.279784
## tempIncrease 0.7272845 0.0949502 6 0.494950 0.959619
##
## change = f0Decrease:
## Nose prob SE df lower.CL upper.CL
## tempNS 0.0000490 0.0022138 6 -0.005368 0.005466
## tempDecrease 0.2499589 0.2164945 6 -0.279784 0.779702
## tempIncrease 0.2272645 0.0893449 6 0.008645 0.445884
##
## change = f0Increase:
## Nose prob SE df lower.CL upper.CL
## tempNS 0.0000077 0.0008779 6 -0.002141 0.002156
## tempDecrease 0.0000005 0.0003537 6 -0.000865 0.000866
## tempIncrease 0.0454510 0.0444078 6 -0.063211 0.154113
##
## Confidence level used: 0.95
##
## $contrasts
## change = f0NS:
## contrast estimate SE df t.ratio p.value
## tempNS - tempDecrease 2.50e-01 0.216508 6 1.154 0.5193
## tempNS - tempIncrease 2.73e-01 0.094980 6 2.871 0.0639
## tempDecrease - tempIncrease 2.28e-02 0.236401 6 0.096 0.9949
##
## change = f0Decrease:
## contrast estimate SE df t.ratio p.value
## tempNS - tempDecrease -2.50e-01 0.216506 6 -1.154 0.5193
## tempNS - tempIncrease -2.27e-01 0.089372 6 -2.542 0.0967
## tempDecrease - tempIncrease 2.27e-02 0.234206 6 0.097 0.9948
##
## change = f0Increase:
## contrast estimate SE df t.ratio p.value
## tempNS - tempDecrease 7.21e-06 0.000947 6 0.008 1.0000
## tempNS - tempIncrease -4.54e-02 0.044416 6 -1.023 0.5906
## tempDecrease - tempIncrease -4.55e-02 0.044409 6 -1.023 0.5904
##
## P value adjustment: tukey method for comparing a family of 3 estimates
No effect of eye temperature change on f0 max change
ggplot(ts |> filter(section=="Diapix", feature=="f0max", !is.na(Eyes)), aes(Eyes, fill=change))+
geom_bar()+
scale_fill_manual(values = c("f0Decrease" = "lightblue", "f0Increase" = "red", "f0NS" = "gray"))+
ggtitle("Eye temp. change and f0 max change")
tidy(m <- nnet::multinom(change ~ Eyes, data=ts |> filter(feature=="f0max", section=="Diapix", !is.na(Eyes)))) |>
as_tibble() |>
mutate(estimate = plogis(estimate))
## # weights: 12 (6 variable)
## initial value 31.859756
## iter 10 value 15.544436
## iter 20 value 15.496500
## iter 30 value 15.492336
## iter 40 value 15.491808
## iter 40 value 15.491808
## final value 15.491808
## converged
## # A tibble: 6 × 6
## y.level term estimate std.error statistic p.value
## <chr> <chr> <dbl> <dbl> <dbl> <dbl>
## 1 f0Decrease (Intercept) 0.0833 1.04 -2.30 0.0217
## 2 f0Decrease EyestempDecrease 0.000129 206. -0.0434 0.965
## 3 f0Decrease EyestempIncrease 0.815 1.20 1.23 0.217
## 4 f0Increase (Intercept) 0.00000245 193. -0.0670 0.947
## 5 f0Increase EyestempDecrease 0.134 1167. -0.00160 0.999
## 6 f0Increase EyestempIncrease 1.00 193. 0.0551 0.956
emmeans(m, pairwise~Eyes, by="change")
## $emmeans
## change = f0NS:
## Eyes prob SE df lower.CL upper.CL
## tempNS 0.9167358 0.0797555 6 0.721581 1.111890
## tempDecrease 0.9999879 0.0024629 6 0.993961 1.006014
## tempIncrease 0.6667512 0.1217084 6 0.368941 0.964561
##
## change = f0Decrease:
## Eyes prob SE df lower.CL upper.CL
## tempNS 0.0832620 0.0797545 6 -0.111890 0.278414
## tempDecrease 0.0000118 0.0024243 6 -0.005920 0.005944
## tempIncrease 0.2665892 0.1141693 6 -0.012773 0.545951
##
## change = f0Increase:
## Eyes prob SE df lower.CL upper.CL
## tempNS 0.0000022 0.0004323 6 -0.001056 0.001060
## tempDecrease 0.0000004 0.0004343 6 -0.001062 0.001063
## tempIncrease 0.0666596 0.0644029 6 -0.090929 0.224248
##
## Confidence level used: 0.95
##
## $contrasts
## change = f0NS:
## contrast estimate SE df t.ratio p.value
## tempNS - tempDecrease -8.33e-02 0.079794 6 -1.043 0.5794
## tempNS - tempIncrease 2.50e-01 0.145512 6 1.718 0.2740
## tempDecrease - tempIncrease 3.33e-01 0.121733 6 2.737 0.0755
##
## change = f0Decrease:
## contrast estimate SE df t.ratio p.value
## tempNS - tempDecrease 8.33e-02 0.079791 6 1.043 0.5794
## tempNS - tempIncrease -1.83e-01 0.139267 6 -1.316 0.4375
## tempDecrease - tempIncrease -2.67e-01 0.114195 6 -2.334 0.1261
##
## change = f0Increase:
## contrast estimate SE df t.ratio p.value
## tempNS - tempDecrease 1.90e-06 0.000613 6 0.003 1.0000
## tempNS - tempIncrease -6.67e-02 0.064404 6 -1.035 0.5840
## tempDecrease - tempIncrease -6.67e-02 0.064404 6 -1.035 0.5840
##
## P value adjustment: tukey method for comparing a family of 3 estimates
No effect of cheek temperature change on f0 max change
ggplot(ts |> filter(section=="Diapix", feature=="f0max"), aes(Cheeks, fill=change))+
geom_bar()+
scale_fill_manual(values = c("f0Decrease" = "lightblue", "f0Increase" = "red", "f0NS" = "gray"))+
ggtitle("Cheek temp. change and f0 max change")
tidy(m <- nnet::multinom(change ~ Cheeks, data=ts |> filter(feature=="f0max", section=="Diapix"))) |>
as_tibble() |>
mutate(estimate = plogis(estimate))
## # weights: 12 (6 variable)
## initial value 39.550042
## iter 10 value 19.684397
## iter 20 value 19.668362
## iter 30 value 19.666080
## iter 30 value 19.666080
## iter 30 value 19.666080
## final value 19.666080
## converged
## # A tibble: 6 × 6
## y.level term estimate std.error statistic p.value
## <chr> <chr> <dbl> <dbl> <dbl> <dbl>
## 1 f0Decrease (Intercept) 0.167 0.632 -2.54 0.0109
## 2 f0Decrease CheekstempDecrease 0.385 1.23 -0.381 0.703
## 3 f0Decrease CheekstempIncrease 0.625 1.03 0.495 0.621
## 4 f0Increase (Intercept) 0.0625 1.03 -2.62 0.00874
## 5 f0Increase CheekstempDecrease 0.000208 94.9 -0.0894 0.929
## 6 f0Increase CheekstempIncrease 0.000121 144. -0.0627 0.950
emmeans(m, pairwise~Cheeks, by="change")
## $emmeans
## change = f0NS:
## Cheeks prob SE df lower.CL upper.CL
## tempNS 7.89e-01 0.09353 6 0.56063 1.01834
## tempDecrease 8.89e-01 0.10476 6 0.63255 1.14522
## tempIncrease 7.50e-01 0.15309 6 0.37539 1.12460
##
## change = f0Decrease:
## Cheeks prob SE df lower.CL upper.CL
## tempNS 1.58e-01 0.08366 6 -0.04680 0.36260
## tempDecrease 1.11e-01 0.10475 6 -0.14522 0.36742
## tempIncrease 2.50e-01 0.15309 6 -0.12461 0.62460
##
## change = f0Increase:
## Cheeks prob SE df lower.CL upper.CL
## tempNS 5.26e-02 0.05122 6 -0.07272 0.17795
## tempDecrease 1.24e-05 0.00117 6 -0.00285 0.00288
## tempIncrease 6.00e-06 0.00087 6 -0.00212 0.00213
##
## Confidence level used: 0.95
##
## $contrasts
## change = f0NS:
## contrast estimate SE df t.ratio p.value
## tempNS - tempDecrease -9.94e-02 0.14043 6 -0.708 0.7680
## tempNS - tempIncrease 3.95e-02 0.17940 6 0.220 0.9738
## tempDecrease - tempIncrease 1.39e-01 0.18550 6 0.749 0.7455
##
## change = f0Decrease:
## contrast estimate SE df t.ratio p.value
## tempNS - tempDecrease 4.68e-02 0.13406 6 0.349 0.9357
## tempNS - tempIncrease -9.21e-02 0.17446 6 -0.528 0.8609
## tempDecrease - tempIncrease -1.39e-01 0.18550 6 -0.749 0.7454
##
## change = f0Increase:
## contrast estimate SE df t.ratio p.value
## tempNS - tempDecrease 5.26e-02 0.05123 6 1.027 0.5886
## tempNS - tempIncrease 5.26e-02 0.05123 6 1.027 0.5884
## tempDecrease - tempIncrease 6.30e-06 0.00146 6 0.004 1.0000
##
## P value adjustment: tukey method for comparing a family of 3 estimates
No effect of condition
ggplot(dat |> filter(feature=="f0SD", section=="Diapix"), aes(condition, fill=change))+
geom_bar()+
scale_fill_manual(values = c("decrease" = "lightblue", "increase" = "red", "ns" = "gray"))+
ggtitle("f0 SD (second part of exp - Diapix)")
tidy(m <- nnet::multinom(change ~ condition, data=dat |> filter(feature=="f0SD", section=="Diapix"))) |>
as_tibble() |>
mutate(estimate = plogis(estimate)) # plogis transforms log odds (the output of the model) into probabilities
## # weights: 9 (4 variable)
## initial value 39.550042
## iter 10 value 19.907756
## iter 20 value 19.894618
## final value 19.894576
## converged
## # A tibble: 4 × 6
## y.level term estimate std.error statistic p.value
## <chr> <chr> <dbl> <dbl> <dbl> <dbl>
## 1 decrease (Intercept) 0.167 0.632 -2.54 0.0109
## 2 decrease conditionimpersonal 0.517 0.897 0.0768 0.939
## 3 increase (Intercept) 0.0000281 48.7 -0.215 0.830
## 4 increase conditionimpersonal 1.00 48.7 0.161 0.872
emmeans(m, pairwise~condition, by="change")
## $emmeans
## change = ns:
## condition prob SE df lower.CL upper.CL
## close 8.33e-01 0.08785 4 0.58942 1.07721
## impersonal 7.78e-01 0.09799 4 0.50574 1.04985
##
## change = decrease:
## condition prob SE df lower.CL upper.CL
## close 1.67e-01 0.08784 4 -0.07722 0.41055
## impersonal 1.67e-01 0.08784 4 -0.07722 0.41054
##
## change = increase:
## condition prob SE df lower.CL upper.CL
## close 2.34e-05 0.00114 4 -0.00314 0.00319
## impersonal 5.55e-02 0.05399 4 -0.09434 0.20543
##
## Confidence level used: 0.95
##
## $contrasts
## change = ns:
## contrast estimate SE df t.ratio p.value
## close - impersonal 5.55e-02 0.132 4 0.422 0.6948
##
## change = decrease:
## contrast estimate SE df t.ratio p.value
## close - impersonal 3.76e-06 0.124 4 0.000 1.0000
##
## change = increase:
## contrast estimate SE df t.ratio p.value
## close - impersonal -5.55e-02 0.054 4 -1.028 0.3620
No effect of perception of partner
tidy(m <- nnet::multinom(change ~ closeness, data=dat |> filter(feature=="f0SD", section=="Diapix"))) |>
as_tibble() |>
mutate(estimate = plogis(estimate))
## # weights: 9 (4 variable)
## initial value 39.550042
## iter 10 value 20.119696
## final value 20.091585
## converged
## # A tibble: 4 × 6
## y.level term estimate std.error statistic p.value
## <chr> <chr> <dbl> <dbl> <dbl> <dbl>
## 1 decrease (Intercept) 0.151 1.01 -1.72 0.0863
## 2 decrease closeness 0.510 0.237 0.170 0.865
## 3 increase (Intercept) 0.189 2.05 -0.710 0.478
## 4 increase closeness 0.331 0.869 -0.811 0.417
tidy(m <- nnet::multinom(change ~ similarity, data=dat |> filter(feature=="f0SD", section=="Diapix"))) |>
as_tibble() |>
mutate(estimate = plogis(estimate))
## # weights: 9 (4 variable)
## initial value 39.550042
## iter 10 value 18.087640
## iter 20 value 18.069332
## final value 18.069329
## converged
## # A tibble: 4 × 6
## y.level term estimate std.error statistic p.value
## <chr> <chr> <dbl> <dbl> <dbl> <dbl>
## 1 decrease (Intercept) 0.0151 1.98 -2.11 0.0352
## 2 decrease similarity 0.605 0.297 1.43 0.152
## 3 increase (Intercept) 0.880 3.88 0.514 0.607
## 4 increase similarity 0.205 1.20 -1.13 0.257
tidy(m <- nnet::multinom(change ~ likeability, data=dat |> filter(feature=="f0SD", section=="Diapix"))) |>
as_tibble() |>
mutate(estimate = plogis(estimate))
## # weights: 9 (4 variable)
## initial value 39.550042
## iter 10 value 17.635355
## iter 20 value 17.528445
## iter 30 value 17.486656
## iter 40 value 17.448163
## iter 50 value 17.437145
## iter 60 value 17.422688
## iter 70 value 17.420293
## iter 80 value 17.416758
## iter 90 value 17.415277
## iter 100 value 17.412877
## final value 17.412877
## stopped after 100 iterations
## # A tibble: 4 × 6
## y.level term estimate std.error statistic p.value
## <chr> <chr> <dbl> <dbl> <dbl> <dbl>
## 1 decrease (Intercept) 0.112 2.35 -0.883 0.377
## 2 decrease likeability 0.518 0.330 0.219 0.827
## 3 increase (Intercept) 1.00 32.5 0.743 0.458
## 4 increase likeability 0.00244 8.06 -0.746 0.456
tidy(m <- nnet::multinom(change ~ becomeFriends, data=dat |> filter(feature=="f0SD", section=="Diapix"))) |>
as_tibble() |>
mutate(estimate = plogis(estimate))
## # weights: 9 (4 variable)
## initial value 39.550042
## iter 10 value 18.778922
## final value 18.770108
## converged
## # A tibble: 4 × 6
## y.level term estimate std.error statistic p.value
## <chr> <chr> <dbl> <dbl> <dbl> <dbl>
## 1 decrease (Intercept) 0.0394 1.96 -1.63 0.103
## 2 decrease becomeFriends 0.565 0.298 0.878 0.380
## 3 increase (Intercept) 0.825 3.40 0.457 0.648
## 4 increase becomeFriends 0.236 1.04 -1.13 0.257
No effect of BFI scores
tidy(m <- nnet::multinom(change ~ extraversion, data=dat |> filter(feature=="f0SD", section=="Diapix"))) |>
as_tibble() |>
mutate(estimate = plogis(estimate))
## # weights: 9 (4 variable)
## initial value 39.550042
## iter 10 value 20.228362
## iter 20 value 20.201092
## final value 20.200886
## converged
## # A tibble: 4 × 6
## y.level term estimate std.error statistic p.value
## <chr> <chr> <dbl> <dbl> <dbl> <dbl>
## 1 decrease (Intercept) 0.0234 2.70 -1.38 0.168
## 2 decrease extraversion 0.648 0.740 0.825 0.409
## 3 increase (Intercept) 0.153 5.97 -0.286 0.775
## 4 increase extraversion 0.378 1.81 -0.275 0.783
tidy(m <- nnet::multinom(change ~ openness, data=dat |> filter(feature=="f0SD", section=="Diapix"))) |>
as_tibble() |>
mutate(estimate = plogis(estimate))
## # weights: 9 (4 variable)
## initial value 39.550042
## iter 10 value 17.297557
## iter 20 value 16.255114
## iter 30 value 16.081367
## iter 40 value 16.053187
## iter 50 value 16.001202
## iter 60 value 16.000199
## iter 70 value 15.998703
## iter 80 value 15.998309
## iter 90 value 15.997488
## iter 100 value 15.997023
## final value 15.997023
## stopped after 100 iterations
## # A tibble: 4 × 6
## y.level term estimate std.error statistic p.value
## <chr> <chr> <dbl> <dbl> <dbl> <dbl>
## 1 decrease (Intercept) 8.70e- 1 2.08 0.913 0.361
## 2 decrease openness 2.65e- 1 0.628 -1.62 0.104
## 3 increase (Intercept) 1 e+ 0 73.6 0.702 0.483
## 4 increase openness 8.06e-12 36.6 -0.697 0.486
tidy(m <- nnet::multinom(change ~ agreeableness, data=dat |> filter(feature=="f0SD", section=="Diapix"))) |>
as_tibble() |>
mutate(estimate = plogis(estimate))
## # weights: 9 (4 variable)
## initial value 39.550042
## iter 10 value 18.994697
## iter 20 value 18.822553
## iter 30 value 18.802293
## final value 18.802293
## converged
## # A tibble: 4 × 6
## y.level term estimate std.error statistic p.value
## <chr> <chr> <dbl> <dbl> <dbl> <dbl>
## 1 decrease (Intercept) 0.00192 3.61 -1.73 0.0834
## 2 decrease agreeableness 0.789 0.986 1.34 0.180
## 3 increase (Intercept) 0.999 10.1 0.706 0.480
## 4 increase agreeableness 0.0318 3.51 -0.974 0.330
tidy(m <- nnet::multinom(change ~ conscientiousness, data=dat |> filter(feature=="f0SD", section=="Diapix"))) |>
as_tibble() |>
mutate(estimate = plogis(estimate))
## # weights: 9 (4 variable)
## initial value 39.550042
## iter 10 value 17.722737
## iter 20 value 17.357139
## iter 30 value 17.339553
## iter 40 value 17.339357
## iter 50 value 17.339324
## final value 17.339321
## converged
## # A tibble: 4 × 6
## y.level term estimate std.error statistic p.value
## <chr> <chr> <dbl> <dbl> <dbl> <dbl>
## 1 decrease (Intercept) 0.000296 4.81 -1.69 0.0914
## 2 decrease conscientiousness 0.827 1.11 1.40 0.160
## 3 increase (Intercept) 1.00 8.45 1.09 0.277
## 4 increase conscientiousness 0.0221 2.96 -1.28 0.200
tidy(m <- nnet::multinom(change ~ neuroticism, data=dat |> filter(feature=="f0SD", section=="Diapix"))) |>
as_tibble() |>
mutate(estimate = plogis(estimate))
## # weights: 9 (4 variable)
## initial value 39.550042
## iter 10 value 19.998733
## iter 20 value 19.988628
## final value 19.988549
## converged
## # A tibble: 4 × 6
## y.level term estimate std.error statistic p.value
## <chr> <chr> <dbl> <dbl> <dbl> <dbl>
## 1 decrease (Intercept) 0.618 1.92 0.250 0.803
## 2 decrease neuroticism 0.338 0.634 -1.06 0.288
## 3 increase (Intercept) 0.119 4.32 -0.465 0.642
## 4 increase neuroticism 0.393 1.40 -0.313 0.754
No effect of forehead temperature change on f0 SD change
ggplot(ts |> filter(section=="Diapix", feature=="f0SD"), aes(Forehead, fill=change))+
geom_bar()+
scale_fill_manual(values = c("f0Decrease" = "lightblue", "f0Increase" = "red", "f0NS" = "gray"))+
ggtitle("Forehead temp. change and f0 SD change")
tidy(m <- nnet::multinom(change ~ Forehead, data=ts |> filter(feature=="f0SD", section=="Diapix"))) |>
as_tibble() |>
mutate(estimate = plogis(estimate))
## # weights: 12 (6 variable)
## initial value 39.550042
## iter 10 value 18.005433
## iter 20 value 17.969731
## iter 30 value 17.964401
## iter 30 value 17.964401
## iter 30 value 17.964401
## final value 17.964401
## converged
## # A tibble: 6 × 6
## y.level term estimate std.error statistic p.value
## <chr> <chr> <dbl> <dbl> <dbl> <dbl>
## 1 f0Decrease (Intercept) 0.0588 1.03 -2.69 0.00715
## 2 f0Decrease ForeheadtempDecrease 0.00183 93.3 -0.0675 0.946
## 3 f0Decrease ForeheadtempIncrease 0.870 1.16 1.64 0.102
## 4 f0Increase (Intercept) 0.0000278 47.4 -0.221 0.825
## 5 f0Increase ForeheadtempDecrease 0.300 294. -0.00289 0.998
## 6 f0Increase ForeheadtempIncrease 1.00 47.4 0.169 0.866
emmeans(m, pairwise~Forehead, by="change")
## $emmeans
## change = f0NS:
## Forehead prob SE df lower.CL upper.CL
## tempNS 9.41e-01 0.05708 6 0.80147 1.08082
## tempDecrease 1.00e+00 0.01126 6 0.97233 1.02741
## tempIncrease 6.67e-01 0.11111 6 0.39479 0.93855
##
## change = f0Decrease:
## Forehead prob SE df lower.CL upper.CL
## tempNS 5.88e-02 0.05707 6 -0.08082 0.19847
## tempDecrease 1.15e-04 0.01071 6 -0.02610 0.02633
## tempIncrease 2.78e-01 0.10557 6 0.01946 0.53611
##
## change = f0Increase:
## Forehead prob SE df lower.CL upper.CL
## tempNS 2.62e-05 0.00124 6 -0.00301 0.00306
## tempDecrease 1.19e-05 0.00345 6 -0.00844 0.00846
## tempIncrease 5.55e-02 0.05399 6 -0.07655 0.18764
##
## Confidence level used: 0.95
##
## $contrasts
## change = f0NS:
## contrast estimate SE df t.ratio p.value
## tempNS - tempDecrease -5.87e-02 0.05818 6 -1.009 0.5982
## tempNS - tempIncrease 2.74e-01 0.12492 6 2.197 0.1502
## tempDecrease - tempIncrease 3.33e-01 0.11168 6 2.984 0.0555
##
## change = f0Decrease:
## contrast estimate SE df t.ratio p.value
## tempNS - tempDecrease 5.87e-02 0.05807 6 1.011 0.5972
## tempNS - tempIncrease -2.19e-01 0.12001 6 -1.824 0.2404
## tempDecrease - tempIncrease -2.78e-01 0.10611 6 -2.617 0.0880
##
## change = f0Increase:
## contrast estimate SE df t.ratio p.value
## tempNS - tempDecrease 1.43e-05 0.00367 6 0.004 1.0000
## tempNS - tempIncrease -5.55e-02 0.05400 6 -1.028 0.5878
## tempDecrease - tempIncrease -5.55e-02 0.05410 6 -1.027 0.5887
##
## P value adjustment: tukey method for comparing a family of 3 estimates
No effect of nose temperature change on f0 SD change
ggplot(ts |> filter(section=="Diapix", feature=="f0SD"), aes(Nose, fill=change))+
geom_bar()+
scale_fill_manual(values = c("f0Decrease" = "lightblue", "f0Increase" = "red", "f0NS" = "gray"))+
ggtitle("Nose temp. change and f0 SD change")
tidy(m <- nnet::multinom(change ~ Nose, data=ts |> filter(feature=="f0SD", section=="Diapix"))) |>
as_tibble() |>
mutate(estimate = plogis(estimate))
## # weights: 12 (6 variable)
## initial value 39.550042
## iter 10 value 16.659205
## iter 20 value 16.633363
## final value 16.631764
## converged
## # A tibble: 6 × 6
## y.level term estimate std.error statistic p.value
## <chr> <chr> <dbl> <dbl> <dbl> <dbl>
## 1 f0Decrease (Intercept) 0.0000119 91.8 -0.124 0.902
## 2 f0Decrease NosetempDecrease 0.0236 937. -0.00397 0.997
## 3 f0Decrease NosetempIncrease 1.00 91.8 0.114 0.910
## 4 f0Increase (Intercept) 0.00000192 228. -0.0577 0.954
## 5 f0Increase NosetempDecrease 0.0601 1446. -0.00190 0.998
## 6 f0Increase NosetempIncrease 1.00 228. 0.0458 0.963
emmeans(m, pairwise~Nose, by="change")
## $emmeans
## change = f0NS:
## Nose prob SE df lower.CL upper.CL
## tempNS 0.9999862 0.0011745 6 0.9971123 1.0028601
## tempDecrease 0.9999996 0.0003202 6 0.9992161 1.0007831
## tempIncrease 0.6818308 0.0993016 6 0.4388486 0.9248129
##
## change = f0Decrease:
## Nose prob SE df lower.CL upper.CL
## tempNS 0.0000119 0.0010898 6 -0.0026547 0.0026785
## tempDecrease 0.0000003 0.0002681 6 -0.0006557 0.0006563
## tempIncrease 0.2727315 0.0949519 6 0.0403926 0.5050705
##
## change = f0Increase:
## Nose prob SE df lower.CL upper.CL
## tempNS 0.0000019 0.0004380 6 -0.0010699 0.0010737
## tempDecrease 0.0000001 0.0001751 6 -0.0004283 0.0004285
## tempIncrease 0.0454377 0.0444016 6 -0.0632092 0.1540846
##
## Confidence level used: 0.95
##
## $contrasts
## change = f0NS:
## contrast estimate SE df t.ratio p.value
## tempNS - tempDecrease -1.34e-05 0.001217 6 -0.011 0.9999
## tempNS - tempIncrease 3.18e-01 0.099308 6 3.204 0.0424
## tempDecrease - tempIncrease 3.18e-01 0.099302 6 3.204 0.0423
##
## change = f0Decrease:
## contrast estimate SE df t.ratio p.value
## tempNS - tempDecrease 1.16e-05 0.001122 6 0.010 0.9999
## tempNS - tempIncrease -2.73e-01 0.094958 6 -2.872 0.0638
## tempDecrease - tempIncrease -2.73e-01 0.094952 6 -2.872 0.0638
##
## change = f0Increase:
## contrast estimate SE df t.ratio p.value
## tempNS - tempDecrease 1.80e-06 0.000472 6 0.004 1.0000
## tempNS - tempIncrease -4.54e-02 0.044404 6 -1.023 0.5905
## tempDecrease - tempIncrease -4.54e-02 0.044402 6 -1.023 0.5905
##
## P value adjustment: tukey method for comparing a family of 3 estimates
No effect of eye temperature change on f0 SD change
ggplot(ts |> filter(section=="Diapix", feature=="f0SD", !is.na(Eyes)), aes(Eyes, fill=change))+
geom_bar()+
scale_fill_manual(values = c("f0Decrease" = "lightblue", "f0Increase" = "red", "f0NS" = "gray"))+
ggtitle("Eye temp. change and f0 SD change")
tidy(m <- nnet::multinom(change ~ Eyes, data=ts |> filter(feature=="f0SD", section=="Diapix", !is.na(Eyes)))) |>
as_tibble() |>
mutate(estimate = plogis(estimate))
## # weights: 12 (6 variable)
## initial value 31.859756
## iter 10 value 16.386908
## iter 20 value 16.357005
## iter 30 value 16.355091
## iter 30 value 16.355091
## iter 30 value 16.355090
## final value 16.355090
## converged
## # A tibble: 6 × 6
## y.level term estimate std.error statistic p.value
## <chr> <chr> <dbl> <dbl> <dbl> <dbl>
## 1 f0Decrease (Intercept) 0.167 0.775 -2.08 0.0377
## 2 f0Decrease EyestempDecrease 0.0000562 211. -0.0464 0.963
## 3 f0Decrease EyestempIncrease 0.577 1.01 0.306 0.759
## 4 f0Increase (Intercept) 0.0000258 62.3 -0.170 0.865
## 5 f0Increase EyestempDecrease 0.130 366. -0.00520 0.996
## 6 f0Increase EyestempIncrease 1.00 62.3 0.131 0.896
emmeans(m, pairwise~Eyes, by="change") # isn't working (I'm thinking maybe because "eyes" have lots of missing values)
## $emmeans
## change = f0NS:
## Eyes prob SE df lower.CL upper.CL
## tempNS 8.33e-01 0.10759 6 0.57006 1.09657
## tempDecrease 1.00e+00 0.00275 6 0.99326 1.00671
## tempIncrease 7.33e-01 0.11418 6 0.45396 1.01273
##
## change = f0Decrease:
## Eyes prob SE df lower.CL upper.CL
## tempNS 1.67e-01 0.10758 6 -0.09658 0.42991
## tempDecrease 1.12e-05 0.00237 6 -0.00579 0.00581
## tempIncrease 2.00e-01 0.10328 6 -0.05272 0.45271
##
## change = f0Increase:
## Eyes prob SE df lower.CL upper.CL
## tempNS 2.15e-05 0.00134 6 -0.00325 0.00329
## tempDecrease 3.90e-06 0.00139 6 -0.00339 0.00340
## tempIncrease 6.67e-02 0.06440 6 -0.09093 0.22424
##
## Confidence level used: 0.95
##
## $contrasts
## change = f0NS:
## contrast estimate SE df t.ratio p.value
## tempNS - tempDecrease -1.67e-01 0.10762 6 -1.549 0.3358
## tempNS - tempIncrease 1.00e-01 0.15688 6 0.637 0.8060
## tempDecrease - tempIncrease 2.67e-01 0.11421 6 2.335 0.1261
##
## change = f0Decrease:
## contrast estimate SE df t.ratio p.value
## tempNS - tempDecrease 1.67e-01 0.10761 6 1.549 0.3358
## tempNS - tempIncrease -3.33e-02 0.14913 6 -0.224 0.9730
## tempDecrease - tempIncrease -2.00e-01 0.10331 6 -1.936 0.2092
##
## change = f0Increase:
## contrast estimate SE df t.ratio p.value
## tempNS - tempDecrease 1.76e-05 0.00193 6 0.009 1.0000
## tempNS - tempIncrease -6.66e-02 0.06442 6 -1.034 0.5843
## tempDecrease - tempIncrease -6.67e-02 0.06442 6 -1.035 0.5841
##
## P value adjustment: tukey method for comparing a family of 3 estimates
No effect of cheek temperature change on f0 SD change
ggplot(ts |> filter(section=="Diapix", feature=="f0SD"), aes(Cheeks, fill=change))+
geom_bar()+
scale_fill_manual(values = c("f0Decrease" = "lightblue", "f0Increase" = "red", "f0NS" = "gray"))+
ggtitle("Cheek temp. change and f0 SD change")
tidy(m <- nnet::multinom(change ~ Cheeks, data=ts |> filter(feature=="f0SD", section=="Diapix"))) |>
as_tibble() |>
mutate(estimate = plogis(estimate))
## # weights: 12 (6 variable)
## initial value 39.550042
## iter 10 value 17.591246
## iter 20 value 17.569183
## final value 17.567114
## converged
## # A tibble: 6 × 6
## y.level term estimate std.error statistic p.value
## <chr> <chr> <dbl> <dbl> <dbl> <dbl>
## 1 f0Decrease (Intercept) 0.278 0.526 -1.82 0.0694
## 2 f0Decrease CheekstempDecrease 0.0000123 154. -0.0736 0.941
## 3 f0Decrease CheekstempIncrease 0.271 1.19 -0.831 0.406
## 4 f0Increase (Intercept) 0.0714 1.04 -2.47 0.0134
## 5 f0Increase CheekstempDecrease 0.0000958 123. -0.0753 0.940
## 6 f0Increase CheekstempIncrease 0.0000642 170. -0.0567 0.955
emmeans(m, pairwise~Cheeks, by="change")
## $emmeans
## change = f0NS:
## Cheeks prob SE df lower.CL upper.CL
## tempNS 0.6842184 0.1066385 6 0.423283 0.945153
## tempDecrease 0.9999879 0.0011585 6 0.997153 1.002823
## tempIncrease 0.8749940 0.1169292 6 0.588879 1.161109
##
## change = f0Decrease:
## Cheeks prob SE df lower.CL upper.CL
## tempNS 0.2631532 0.1010220 6 0.015961 0.510345
## tempDecrease 0.0000047 0.0007235 6 -0.001766 0.001775
## tempIncrease 0.1250017 0.1169275 6 -0.161109 0.411113
##
## change = f0Increase:
## Cheeks prob SE df lower.CL upper.CL
## tempNS 0.0526283 0.0512263 6 -0.072718 0.177975
## tempDecrease 0.0000074 0.0009047 6 -0.002206 0.002221
## tempIncrease 0.0000043 0.0007348 6 -0.001794 0.001802
##
## Confidence level used: 0.95
##
## $contrasts
## change = f0NS:
## contrast estimate SE df t.ratio p.value
## tempNS - tempDecrease -3.16e-01 0.10664 6 -2.961 0.0571
## tempNS - tempIncrease -1.91e-01 0.15825 6 -1.206 0.4926
## tempDecrease - tempIncrease 1.25e-01 0.11693 6 1.069 0.5653
##
## change = f0Decrease:
## contrast estimate SE df t.ratio p.value
## tempNS - tempDecrease 2.63e-01 0.10102 6 2.605 0.0893
## tempNS - tempIncrease 1.38e-01 0.15452 6 0.894 0.6634
## tempDecrease - tempIncrease -1.25e-01 0.11693 6 -1.069 0.5652
##
## change = f0Increase:
## contrast estimate SE df t.ratio p.value
## tempNS - tempDecrease 5.26e-02 0.05123 6 1.027 0.5884
## tempNS - tempIncrease 5.26e-02 0.05123 6 1.027 0.5883
## tempDecrease - tempIncrease 3.05e-06 0.00117 6 0.003 1.0000
##
## P value adjustment: tukey method for comparing a family of 3 estimates
No effect of condition
ggplot(dat |> filter(feature=="f0median", section=="entireExp"), aes(condition, fill=change))+
geom_bar()+
scale_fill_manual(values = c("decrease" = "lightblue", "increase" = "red", "ns" = "gray"))+
ggtitle("f0 median across entire experiment")
tidy(m <- nnet::multinom(change ~ condition, data=dat |> filter(feature=="f0median", section=="entireExp"))) |>
as_tibble() |>
mutate(estimate = plogis(estimate)) # plogis transforms log odds (the output of the model) into probabilities
## # weights: 9 (4 variable)
## initial value 41.747267
## iter 10 value 29.045493
## iter 20 value 29.026327
## final value 29.026326
## converged
## # A tibble: 4 × 6
## y.level term estimate std.error statistic p.value
## <chr> <chr> <dbl> <dbl> <dbl> <dbl>
## 1 decrease (Intercept) 0.0000151 68.9 -0.161 0.872
## 2 decrease conditionimpersonal 1.00 68.9 0.142 0.887
## 3 increase (Intercept) 0.300 0.488 -1.74 0.0825
## 4 increase conditionimpersonal 0.459 0.761 -0.216 0.829
emmeans(m, pairwise~condition, by="change")
## $emmeans
## change = ns:
## condition prob SE df lower.CL upper.CL
## close 7.00e-01 0.102470 4 0.41550 0.98450
## impersonal 6.11e-01 0.114905 4 0.29207 0.93013
##
## change = decrease:
## condition prob SE df lower.CL upper.CL
## close 1.05e-05 0.000726 4 -0.00201 0.00203
## impersonal 1.67e-01 0.087841 4 -0.07722 0.41056
##
## change = increase:
## condition prob SE df lower.CL upper.CL
## close 3.00e-01 0.102469 4 0.01549 0.58449
## impersonal 2.22e-01 0.097993 4 -0.04984 0.49431
##
## Confidence level used: 0.95
##
## $contrasts
## change = ns:
## contrast estimate SE df t.ratio p.value
## close - impersonal 0.0889 0.1540 4 0.577 0.5946
##
## change = decrease:
## contrast estimate SE df t.ratio p.value
## close - impersonal -0.1667 0.0878 4 -1.897 0.1307
##
## change = increase:
## contrast estimate SE df t.ratio p.value
## close - impersonal 0.0778 0.1418 4 0.548 0.6126
No effect of perception of partner
tidy(m <- nnet::multinom(change ~ closeness, data=dat |> filter(feature=="f0median", section=="entireExp"))) |>
as_tibble() |>
mutate(estimate = plogis(estimate))
## # weights: 9 (4 variable)
## initial value 41.747267
## iter 10 value 31.209929
## final value 31.209712
## converged
## # A tibble: 4 × 6
## y.level term estimate std.error statistic p.value
## <chr> <chr> <dbl> <dbl> <dbl> <dbl>
## 1 decrease (Intercept) 0.138 1.31 -1.40 0.162
## 2 decrease closeness 0.479 0.345 -0.239 0.811
## 3 increase (Intercept) 0.205 0.865 -1.56 0.118
## 4 increase closeness 0.529 0.201 0.574 0.566
tidy(m <- nnet::multinom(change ~ similarity, data=dat |> filter(feature=="f0median", section=="entireExp"))) |>
as_tibble() |>
mutate(estimate = plogis(estimate))
## # weights: 9 (4 variable)
## initial value 41.747267
## iter 10 value 31.340790
## iter 10 value 31.340790
## final value 31.340790
## converged
## # A tibble: 4 × 6
## y.level term estimate std.error statistic p.value
## <chr> <chr> <dbl> <dbl> <dbl> <dbl>
## 1 decrease (Intercept) 0.0570 2.20 -1.28 0.202
## 2 decrease similarity 0.529 0.354 0.333 0.739
## 3 increase (Intercept) 0.339 1.23 -0.543 0.587
## 4 increase similarity 0.489 0.212 -0.211 0.833
tidy(m <- nnet::multinom(change ~ likeability, data=dat |> filter(feature=="f0median", section=="entireExp"))) |>
as_tibble() |>
mutate(estimate = plogis(estimate))
## # weights: 9 (4 variable)
## initial value 41.747267
## iter 10 value 31.409907
## final value 31.409844
## converged
## # A tibble: 4 × 6
## y.level term estimate std.error statistic p.value
## <chr> <chr> <dbl> <dbl> <dbl> <dbl>
## 1 decrease (Intercept) 0.0737 3.08 -0.822 0.411
## 2 decrease likeability 0.515 0.432 0.137 0.891
## 3 increase (Intercept) 0.343 1.81 -0.360 0.719
## 4 increase likeability 0.490 0.260 -0.149 0.882
tidy(m <- nnet::multinom(change ~ becomeFriends, data=dat |> filter(feature=="f0median", section=="entireExp"))) |>
as_tibble() |>
mutate(estimate = plogis(estimate))
## # weights: 9 (4 variable)
## initial value 41.747267
## iter 10 value 31.214880
## final value 31.214880
## converged
## # A tibble: 4 × 6
## y.level term estimate std.error statistic p.value
## <chr> <chr> <dbl> <dbl> <dbl> <dbl>
## 1 decrease (Intercept) 0.300 1.93 -0.440 0.660
## 2 decrease becomeFriends 0.444 0.338 -0.661 0.509
## 3 increase (Intercept) 0.347 1.34 -0.471 0.638
## 4 increase becomeFriends 0.488 0.217 -0.220 0.826
No effect of BFI scores
tidy(m <- nnet::multinom(change ~ extraversion, data=dat |> filter(feature=="f0median", section=="entireExp"))) |>
as_tibble() |>
mutate(estimate = plogis(estimate))
## # weights: 9 (4 variable)
## initial value 41.747267
## iter 10 value 29.975297
## iter 20 value 29.848747
## iter 20 value 29.848747
## iter 20 value 29.848746
## final value 29.848746
## converged
## # A tibble: 4 × 6
## y.level term estimate std.error statistic p.value
## <chr> <chr> <dbl> <dbl> <dbl> <dbl>
## 1 decrease (Intercept) 0.00374 3.90 -1.43 0.152
## 2 decrease extraversion 0.729 1.06 0.933 0.351
## 3 increase (Intercept) 0.00872 2.46 -1.93 0.0539
## 4 increase extraversion 0.748 0.677 1.60 0.109
tidy(m <- nnet::multinom(change ~ openness, data=dat |> filter(feature=="f0median", section=="entireExp"))) |>
as_tibble() |>
mutate(estimate = plogis(estimate))
## # weights: 9 (4 variable)
## initial value 41.747267
## iter 10 value 31.249348
## iter 20 value 31.240598
## iter 20 value 31.240598
## iter 20 value 31.240598
## final value 31.240598
## converged
## # A tibble: 4 × 6
## y.level term estimate std.error statistic p.value
## <chr> <chr> <dbl> <dbl> <dbl> <dbl>
## 1 decrease (Intercept) 0.171 2.63 -0.599 0.549
## 2 decrease openness 0.461 0.753 -0.210 0.833
## 3 increase (Intercept) 0.140 1.74 -1.05 0.295
## 4 increase openness 0.563 0.470 0.537 0.591
tidy(m <- nnet::multinom(change ~ agreeableness, data=dat |> filter(feature=="f0median", section=="entireExp"))) |>
as_tibble() |>
mutate(estimate = plogis(estimate))
## # weights: 9 (4 variable)
## initial value 41.747267
## iter 10 value 30.317732
## iter 20 value 30.175384
## iter 20 value 30.175384
## final value 30.175384
## converged
## # A tibble: 4 × 6
## y.level term estimate std.error statistic p.value
## <chr> <chr> <dbl> <dbl> <dbl> <dbl>
## 1 decrease (Intercept) 0.000395 4.87 -1.61 0.108
## 2 decrease agreeableness 0.832 1.30 1.23 0.218
## 3 increase (Intercept) 0.0186 2.74 -1.45 0.148
## 4 increase agreeableness 0.706 0.771 1.14 0.255
tidy(m <- nnet::multinom(change ~ conscientiousness, data=dat |> filter(feature=="f0median", section=="entireExp"))) |>
as_tibble() |>
mutate(estimate = plogis(estimate))
## # weights: 9 (4 variable)
## initial value 41.747267
## iter 10 value 30.279952
## iter 20 value 30.188601
## final value 30.188541
## converged
## # A tibble: 4 × 6
## y.level term estimate std.error statistic p.value
## <chr> <chr> <dbl> <dbl> <dbl> <dbl>
## 1 decrease (Intercept) 0.0000121 7.59 -1.49 0.136
## 2 decrease conscientiousness 0.897 1.71 1.26 0.207
## 3 increase (Intercept) 0.487 2.60 -0.0198 0.984
## 4 increase conscientiousness 0.446 0.650 -0.335 0.738
tidy(m <- nnet::multinom(change ~ neuroticism, data=dat |> filter(feature=="f0median", section=="entireExp"))) |>
as_tibble() |>
mutate(estimate = plogis(estimate))
## # weights: 9 (4 variable)
## initial value 41.747267
## iter 10 value 29.638191
## final value 29.629303
## converged
## # A tibble: 4 × 6
## y.level term estimate std.error statistic p.value
## <chr> <chr> <dbl> <dbl> <dbl> <dbl>
## 1 decrease (Intercept) 0.808 2.72 0.528 0.597
## 2 decrease neuroticism 0.236 0.936 -1.25 0.210
## 3 increase (Intercept) 0.858 1.77 1.02 0.309
## 4 increase neuroticism 0.294 0.573 -1.53 0.125
No effect of forehead temperature change on f0 median change
ggplot(ts |> filter(section=="entireExp", feature=="f0median"), aes(Forehead, fill=change))+
geom_bar()+
scale_fill_manual(values = c("f0Decrease" = "lightblue", "f0Increase" = "red", "f0NS" = "gray"))+
ggtitle("Forehead temp. change and f0 median change")
tidy(m <- nnet::multinom(change ~ Forehead, data=ts |> filter(feature=="f0median", section=="entireExp"))) |>
as_tibble() |>
mutate(estimate = plogis(estimate))
## # weights: 12 (6 variable)
## initial value 41.747267
## iter 10 value 29.919868
## iter 20 value 29.859819
## iter 30 value 29.855467
## iter 30 value 29.855467
## iter 30 value 29.855467
## final value 29.855467
## converged
## # A tibble: 6 × 6
## y.level term estimate std.error statistic p.value
## <chr> <chr> <dbl> <dbl> <dbl> <dbl>
## 1 f0Decrease (Intercept) 0.111 1.06 -1.96 0.0499
## 2 f0Decrease ForeheadtempDecrease 0.00144 74.6 -0.0877 0.930
## 3 f0Decrease ForeheadtempIncrease 0.500 1.30 0.000206 1.00
## 4 f0Increase (Intercept) 0.111 1.06 -1.96 0.0499
## 5 f0Increase ForeheadtempDecrease 0.000863 96.3 -0.0733 0.942
## 6 f0Increase ForeheadtempIncrease 0.818 1.14 1.32 0.187
emmeans(m, pairwise~Forehead, by="change")
## $emmeans
## change = f0NS:
## Forehead prob SE df lower.CL upper.CL
## tempNS 0.800035 0.1265 6 0.4905 1.1095
## tempDecrease 0.999712 0.0170 6 0.9582 1.0412
## tempIncrease 0.592590 0.0946 6 0.3612 0.8240
##
## change = f0Decrease:
## Forehead prob SE df lower.CL upper.CL
## tempNS 0.099979 0.0949 6 -0.1321 0.3321
## tempDecrease 0.000180 0.0134 6 -0.0326 0.0330
## tempIncrease 0.074075 0.0504 6 -0.0493 0.1974
##
## change = f0Increase:
## Forehead prob SE df lower.CL upper.CL
## tempNS 0.099987 0.0949 6 -0.1321 0.3321
## tempDecrease 0.000108 0.0104 6 -0.0253 0.0255
## tempIncrease 0.333335 0.0907 6 0.1113 0.5553
##
## Confidence level used: 0.95
##
## $contrasts
## change = f0NS:
## contrast estimate SE df t.ratio p.value
## tempNS - tempDecrease -0.1997 0.1276 6 -1.565 0.3295
## tempNS - tempIncrease 0.2074 0.1579 6 1.314 0.4388
## tempDecrease - tempIncrease 0.4071 0.0961 6 4.238 0.0129
##
## change = f0Decrease:
## contrast estimate SE df t.ratio p.value
## tempNS - tempDecrease 0.0998 0.0958 6 1.042 0.5802
## tempNS - tempIncrease 0.0259 0.1074 6 0.241 0.9686
## tempDecrease - tempIncrease -0.0739 0.0522 6 -1.417 0.3911
##
## change = f0Increase:
## contrast estimate SE df t.ratio p.value
## tempNS - tempDecrease 0.0999 0.0954 6 1.047 0.5775
## tempNS - tempIncrease -0.2333 0.1313 6 -1.778 0.2547
## tempDecrease - tempIncrease -0.3332 0.0913 6 -3.649 0.0249
##
## P value adjustment: tukey method for comparing a family of 3 estimates
No effect of nose temperature change on f0 median change
ggplot(ts |> filter(section=="entireExp", feature=="f0median"), aes(Nose, fill=change))+
geom_bar()+
scale_fill_manual(values = c("f0Decrease" = "lightblue", "f0Increase" = "red", "f0NS" = "gray"))+
ggtitle("Nose temp. change and f0 median change")
tidy(m <- nnet::multinom(change ~ Nose, data=ts |> filter(feature=="f0median", section=="entireExp"))) |>
as_tibble() |>
mutate(estimate = plogis(estimate))
## # weights: 12 (6 variable)
## initial value 41.747267
## iter 10 value 30.047237
## iter 20 value 29.977360
## iter 30 value 29.972833
## final value 29.972828
## converged
## # A tibble: 6 × 6
## y.level term estimate std.error statistic p.value
## <chr> <chr> <dbl> <dbl> <dbl> <dbl>
## 1 f0Decrease (Intercept) 0.0000233 147. -0.0728 0.942
## 2 f0Decrease NosetempDecrease 0.00952 1230. -0.00378 0.997
## 3 f0Decrease NosetempIncrease 1.00 147. 0.0598 0.952
## 4 f0Increase (Intercept) 0.0000670 86.4 -0.111 0.911
## 5 f0Increase NosetempDecrease 1.00 86.4 0.107 0.915
## 6 f0Increase NosetempIncrease 1.00 86.4 0.101 0.920
emmeans(m, pairwise~Nose, by="change")
## $emmeans
## change = f0NS:
## Nose prob SE df lower.CL upper.CL
## tempNS 1.00e+00 0.006716 6 0.983477 1.016343
## tempDecrease 6.00e-01 0.219092 6 0.063866 1.136065
## tempIncrease 6.45e-01 0.085935 6 0.434878 0.855429
##
## change = f0Decrease:
## Nose prob SE df lower.CL upper.CL
## tempNS 2.33e-05 0.003410 6 -0.008322 0.008368
## tempDecrease 1.00e-07 0.000164 6 -0.000401 0.000401
## tempIncrease 9.68e-02 0.053101 6 -0.033157 0.226707
##
## change = f0Increase:
## Nose prob SE df lower.CL upper.CL
## tempNS 6.70e-05 0.005786 6 -0.014090 0.014224
## tempDecrease 4.00e-01 0.219092 6 -0.136065 0.936134
## tempIncrease 2.58e-01 0.078591 6 0.065767 0.450376
##
## Confidence level used: 0.95
##
## $contrasts
## change = f0NS:
## contrast estimate SE df t.ratio p.value
## tempNS - tempDecrease 4.00e-01 0.21920 6 1.825 0.2403
## tempNS - tempIncrease 3.55e-01 0.08620 6 4.116 0.0147
## tempDecrease - tempIncrease -4.52e-02 0.23534 6 -0.192 0.9799
##
## change = f0Decrease:
## contrast estimate SE df t.ratio p.value
## tempNS - tempDecrease 2.31e-05 0.00341 6 0.007 1.0000
## tempNS - tempIncrease -9.68e-02 0.05321 6 -1.818 0.2422
## tempDecrease - tempIncrease -9.68e-02 0.05310 6 -1.822 0.2410
##
## change = f0Increase:
## contrast estimate SE df t.ratio p.value
## tempNS - tempDecrease -4.00e-01 0.21917 6 -1.825 0.2402
## tempNS - tempIncrease -2.58e-01 0.07880 6 -3.274 0.0389
## tempDecrease - tempIncrease 1.42e-01 0.23276 6 0.610 0.8202
##
## P value adjustment: tukey method for comparing a family of 3 estimates
No effect of eye temperature change on f0 median change
ggplot(ts |> filter(section=="entireExp", feature=="f0median", !is.na(Eyes)), aes(Eyes, fill=change))+
geom_bar()+
scale_fill_manual(values = c("f0Decrease" = "lightblue", "f0Increase" = "red", "f0NS" = "gray"))+
ggtitle("Eye temp. change and f0 median change")
The model is weirdly producing NaNs for the category
tempDecrease even when I’ve explicitly removed the NA
values in the dataset (and also after droplevel-ing it).
But anyway, there are such few values in the tempNS
category that a real comparison wouldn’t have been meaningful, like for
other models above.
tidy(m <- nnet::multinom(change ~ Eyes, data=ts |> filter(feature=="f0median", section=="entireExp", !is.na(Eyes)))) |>
as_tibble() |>
mutate(estimate = plogis(estimate))
## # weights: 12 (6 variable)
## initial value 36.254206
## iter 10 value 21.083942
## iter 20 value 21.050606
## iter 30 value 21.048274
## iter 30 value 21.048274
## iter 30 value 21.048274
## final value 21.048274
## converged
## Warning in sqrt(diag(vc)): NaNs produced
## # A tibble: 6 × 6
## y.level term estimate std.error statistic p.value
## <chr> <chr> <dbl> <dbl> <dbl> <dbl>
## 1 f0Decrease (Intercept) 0.0000161 102. -0.108 0.914
## 2 f0Decrease EyestempDecrease 0.5 NaN NaN NaN
## 3 f0Decrease EyestempIncrease 1.00 102. 0.0806 0.936
## 4 f0Increase (Intercept) 0.0000384 65.9 -0.154 0.877
## 5 f0Increase EyestempDecrease 0.5 0 NaN NaN
## 6 f0Increase EyestempIncrease 1.00 65.9 0.145 0.885
# emmeans(m, pairwise~Eyes, by="change")
No effect of cheek temperature change on f0 median change
ggplot(ts |> filter(section=="entireExp", feature=="f0median") |> drop_na(), aes(Cheeks, fill=change))+
geom_bar()+
scale_fill_manual(values = c("f0Decrease" = "lightblue", "f0Increase" = "red", "f0NS" = "gray"))+
ggtitle("Cheek temp. change and f0 median change")
tidy(m <- nnet::multinom(change ~ Cheeks, data=ts |> filter(feature=="f0median", section=="entireExp"))) |>
as_tibble() |>
mutate(estimate = plogis(estimate))
## # weights: 12 (6 variable)
## initial value 41.747267
## iter 10 value 30.209177
## iter 20 value 30.194803
## final value 30.193839
## converged
## # A tibble: 6 × 6
## y.level term estimate std.error statistic p.value
## <chr> <chr> <dbl> <dbl> <dbl> <dbl>
## 1 f0Decrease (Intercept) 0.182 0.782 -1.92 0.0543
## 2 f0Decrease CheekstempDecrease 0.0000545 109. -0.0903 0.928
## 3 f0Decrease CheekstempIncrease 0.333 1.31 -0.528 0.597
## 4 f0Increase (Intercept) 0.357 0.558 -1.05 0.292
## 5 f0Increase CheekstempDecrease 0.340 0.977 -0.681 0.496
## 6 f0Increase CheekstempIncrease 0.375 0.869 -0.588 0.557
emmeans(m, pairwise~Cheeks, by="change")
## $emmeans
## change = f0NS:
## Cheeks prob SE df lower.CL upper.CL
## tempNS 5.63e-01 0.12402 6 0.25906 0.86599
## tempDecrease 7.78e-01 0.13858 6 0.43867 1.11687
## tempIncrease 6.92e-01 0.12800 6 0.37915 1.00557
##
## change = f0Decrease:
## Cheeks prob SE df lower.CL upper.CL
## tempNS 1.25e-01 0.08267 6 -0.07733 0.32723
## tempDecrease 9.40e-06 0.00102 6 -0.00249 0.00251
## tempIncrease 7.68e-02 0.07387 6 -0.10391 0.25761
##
## change = f0Increase:
## Cheeks prob SE df lower.CL upper.CL
## tempNS 3.13e-01 0.11588 6 0.02897 0.59607
## tempDecrease 2.22e-01 0.13858 6 -0.11687 0.56131
## tempIncrease 2.31e-01 0.11686 6 -0.05515 0.51674
##
## Confidence level used: 0.95
##
## $contrasts
## change = f0NS:
## contrast estimate SE df t.ratio p.value
## tempNS - tempDecrease -0.21524 0.1860 6 -1.157 0.5176
## tempNS - tempIncrease -0.12983 0.1782 6 -0.728 0.7567
## tempDecrease - tempIncrease 0.08541 0.1887 6 0.453 0.8951
##
## change = f0Decrease:
## contrast estimate SE df t.ratio p.value
## tempNS - tempDecrease 0.12494 0.0827 6 1.511 0.3508
## tempNS - tempIncrease 0.04810 0.1109 6 0.434 0.9030
## tempDecrease - tempIncrease -0.07684 0.0739 6 -1.040 0.5812
##
## change = f0Increase:
## contrast estimate SE df t.ratio p.value
## tempNS - tempDecrease 0.09030 0.1806 6 0.500 0.8741
## tempNS - tempIncrease 0.08173 0.1646 6 0.497 0.8756
## tempDecrease - tempIncrease -0.00857 0.1813 6 -0.047 0.9988
##
## P value adjustment: tukey method for comparing a family of 3 estimates
No effect of condition
ggplot(dat |> filter(feature=="f0max", section=="entireExp"), aes(condition, fill=change))+
geom_bar()+
scale_fill_manual(values = c("decrease" = "lightblue", "increase" = "red", "ns" = "gray"))+
ggtitle("f0 max across entire experiment")
tidy(m <- nnet::multinom(change ~ condition, data=dat |> filter(feature=="f0max", section=="entireExp"))) |>
as_tibble() |>
mutate(estimate = plogis(estimate)) # plogis transforms log odds (the output of the model) into probabilities
## # weights: 9 (4 variable)
## initial value 41.747267
## iter 10 value 28.079314
## iter 20 value 28.065243
## final value 28.065230
## converged
## # A tibble: 4 × 6
## y.level term estimate std.error statistic p.value
## <chr> <chr> <dbl> <dbl> <dbl> <dbl>
## 1 decrease (Intercept) 0.389 0.483 -0.935 0.350
## 2 decrease conditionimpersonal 0.310 0.745 -1.07 0.282
## 3 increase (Intercept) 0.154 0.769 -2.22 0.0266
## 4 increase conditionimpersonal 0.000152 50.9 -0.173 0.863
emmeans(m, pairwise~condition, by="change")
## $emmeans
## change = ns:
## condition prob SE df lower.CL upper.CL
## close 5.50e-01 0.11124 4 0.24115 0.85887
## impersonal 7.78e-01 0.09799 4 0.50570 1.04984
##
## change = decrease:
## condition prob SE df lower.CL upper.CL
## close 3.50e-01 0.10665 4 0.05389 0.64612
## impersonal 2.22e-01 0.09799 4 -0.04985 0.49427
##
## change = increase:
## condition prob SE df lower.CL upper.CL
## close 1.00e-01 0.06708 4 -0.08625 0.28622
## impersonal 2.15e-05 0.00109 4 -0.00301 0.00305
##
## Confidence level used: 0.95
##
## $contrasts
## change = ns:
## contrast estimate SE df t.ratio p.value
## close - impersonal -0.228 0.1482 4 -1.536 0.1993
##
## change = decrease:
## contrast estimate SE df t.ratio p.value
## close - impersonal 0.128 0.1448 4 0.882 0.4274
##
## change = increase:
## contrast estimate SE df t.ratio p.value
## close - impersonal 0.100 0.0671 4 1.490 0.2105
No effect of perception of partner
tidy(m <- nnet::multinom(change ~ closeness, data=dat |> filter(feature=="f0max", section=="entireExp"))) |>
as_tibble() |>
mutate(estimate = plogis(estimate))
## # weights: 9 (4 variable)
## initial value 41.747267
## iter 10 value 29.859869
## final value 29.848255
## converged
## # A tibble: 4 × 6
## y.level term estimate std.error statistic p.value
## <chr> <chr> <dbl> <dbl> <dbl> <dbl>
## 1 decrease (Intercept) 0.390 0.798 -0.563 0.574
## 2 decrease closeness 0.474 0.200 -0.514 0.607
## 3 increase (Intercept) 0.0996 1.61 -1.37 0.170
## 4 increase closeness 0.478 0.406 -0.219 0.827
tidy(m <- nnet::multinom(change ~ similarity, data=dat |> filter(feature=="f0max", section=="entireExp"))) |>
as_tibble() |>
mutate(estimate = plogis(estimate))
## # weights: 9 (4 variable)
## initial value 41.747267
## iter 10 value 29.849406
## final value 29.849368
## converged
## # A tibble: 4 × 6
## y.level term estimate std.error statistic p.value
## <chr> <chr> <dbl> <dbl> <dbl> <dbl>
## 1 decrease (Intercept) 0.273 1.23 -0.800 0.424
## 2 decrease similarity 0.507 0.206 0.137 0.891
## 3 increase (Intercept) 0.193 2.26 -0.635 0.526
## 4 increase similarity 0.449 0.422 -0.487 0.626
tidy(m <- nnet::multinom(change ~ likeability, data=dat |> filter(feature=="f0max", section=="entireExp"))) |>
as_tibble() |>
mutate(estimate = plogis(estimate))
## # weights: 9 (4 variable)
## initial value 41.747267
## iter 10 value 29.953332
## iter 20 value 29.909757
## iter 20 value 29.909757
## final value 29.909756
## converged
## # A tibble: 4 × 6
## y.level term estimate std.error statistic p.value
## <chr> <chr> <dbl> <dbl> <dbl> <dbl>
## 1 decrease (Intercept) 0.183 1.81 -0.828 0.408
## 2 decrease likeability 0.524 0.255 0.383 0.701
## 3 increase (Intercept) 0.0388 3.71 -0.865 0.387
## 4 increase likeability 0.525 0.520 0.190 0.849
tidy(m <- nnet::multinom(change ~ becomeFriends, data=dat |> filter(feature=="f0max", section=="entireExp"))) |>
as_tibble() |>
mutate(estimate = plogis(estimate))
## # weights: 9 (4 variable)
## initial value 41.747267
## iter 10 value 29.785106
## final value 29.784995
## converged
## # A tibble: 4 × 6
## y.level term estimate std.error statistic p.value
## <chr> <chr> <dbl> <dbl> <dbl> <dbl>
## 1 decrease (Intercept) 0.427 1.26 -0.235 0.814
## 2 decrease becomeFriends 0.478 0.207 -0.433 0.665
## 3 increase (Intercept) 0.0241 3.15 -1.18 0.240
## 4 increase becomeFriends 0.547 0.473 0.396 0.692
No effect of BFI scores
tidy(m <- nnet::multinom(change ~ extraversion, data=dat |> filter(feature=="f0max", section=="entireExp"))) |>
as_tibble() |>
mutate(estimate = plogis(estimate))
## # weights: 9 (4 variable)
## initial value 41.747267
## iter 10 value 29.147155
## iter 20 value 29.061470
## iter 20 value 29.061470
## iter 20 value 29.061470
## final value 29.061470
## converged
## # A tibble: 4 × 6
## y.level term estimate std.error statistic p.value
## <chr> <chr> <dbl> <dbl> <dbl> <dbl>
## 1 decrease (Intercept) 0.662 2.19 0.307 0.759
## 2 decrease extraversion 0.392 0.639 -0.686 0.493
## 3 increase (Intercept) 0.000508 5.32 -1.42 0.154
## 4 increase extraversion 0.796 1.35 1.01 0.313
tidy(m <- nnet::multinom(change ~ openness, data=dat |> filter(feature=="f0max", section=="entireExp"))) |>
as_tibble() |>
mutate(estimate = plogis(estimate))
## # weights: 9 (4 variable)
## initial value 41.747267
## iter 10 value 29.918257
## iter 20 value 29.909895
## iter 20 value 29.909895
## iter 20 value 29.909895
## final value 29.909895
## converged
## # A tibble: 4 × 6
## y.level term estimate std.error statistic p.value
## <chr> <chr> <dbl> <dbl> <dbl> <dbl>
## 1 decrease (Intercept) 0.332 1.61 -0.437 0.662
## 2 decrease openness 0.491 0.446 -0.0765 0.939
## 3 increase (Intercept) 0.0215 3.58 -1.07 0.286
## 4 increase openness 0.588 0.937 0.379 0.704
tidy(m <- nnet::multinom(change ~ agreeableness, data=dat |> filter(feature=="f0max", section=="entireExp"))) |>
as_tibble() |>
mutate(estimate = plogis(estimate))
## # weights: 9 (4 variable)
## initial value 41.747267
## iter 10 value 29.718774
## final value 29.689323
## converged
## # A tibble: 4 × 6
## y.level term estimate std.error statistic p.value
## <chr> <chr> <dbl> <dbl> <dbl> <dbl>
## 1 decrease (Intercept) 0.187 2.51 -0.585 0.559
## 2 decrease agreeableness 0.547 0.717 0.261 0.794
## 3 increase (Intercept) 0.00147 5.55 -1.18 0.240
## 4 increase agreeableness 0.754 1.49 0.751 0.453
tidy(m <- nnet::multinom(change ~ conscientiousness, data=dat |> filter(feature=="f0max", section=="entireExp"))) |>
as_tibble() |>
mutate(estimate = plogis(estimate))
## # weights: 9 (4 variable)
## initial value 41.747267
## iter 10 value 29.780386
## iter 20 value 29.761308
## final value 29.761107
## converged
## # A tibble: 4 × 6
## y.level term estimate std.error statistic p.value
## <chr> <chr> <dbl> <dbl> <dbl> <dbl>
## 1 decrease (Intercept) 0.0815 2.78 -0.872 0.383
## 2 decrease conscientiousness 0.598 0.676 0.585 0.559
## 3 increase (Intercept) 0.202 4.77 -0.288 0.773
## 4 increase conscientiousness 0.427 1.21 -0.242 0.809
tidy(m <- nnet::multinom(change ~ neuroticism, data=dat |> filter(feature=="f0max", section=="entireExp"))) |>
as_tibble() |>
mutate(estimate = plogis(estimate))
## # weights: 9 (4 variable)
## initial value 41.747267
## iter 10 value 29.598303
## final value 29.595088
## converged
## # A tibble: 4 × 6
## y.level term estimate std.error statistic p.value
## <chr> <chr> <dbl> <dbl> <dbl> <dbl>
## 1 decrease (Intercept) 0.569 1.62 0.170 0.865
## 2 decrease neuroticism 0.413 0.510 -0.686 0.493
## 3 increase (Intercept) 0.393 3.13 -0.139 0.890
## 4 increase neuroticism 0.335 1.05 -0.655 0.513
No effect of forehead temperature change on f0 max change
ggplot(ts |> filter(section=="entireExp", feature=="f0max"), aes(Forehead, fill=change))+
geom_bar()+
scale_fill_manual(values = c("f0Decrease" = "lightblue", "f0Increase" = "red", "f0NS" = "gray"))+
ggtitle("Forehead temp. change and f0 max change")
tidy(m <- nnet::multinom(change ~ Forehead, data=ts |> filter(feature=="f0max", section=="entireExp"))) |>
as_tibble() |>
mutate(estimate = plogis(estimate))
## # weights: 12 (6 variable)
## initial value 41.747267
## iter 10 value 29.137489
## iter 20 value 29.070581
## iter 30 value 29.066494
## iter 30 value 29.066494
## iter 30 value 29.066494
## final value 29.066494
## converged
## # A tibble: 6 × 6
## y.level term estimate std.error statistic p.value
## <chr> <chr> <dbl> <dbl> <dbl> <dbl>
## 1 f0Decrease (Intercept) 0.222 0.802 -1.56 0.118
## 2 f0Decrease ForeheadtempDecrease 0.000901 62.3 -0.112 0.910
## 3 f0Decrease ForeheadtempIncrease 0.650 0.902 0.684 0.494
## 4 f0Increase (Intercept) 0.125 1.07 -1.82 0.0687
## 5 f0Increase ForeheadtempDecrease 0.000721 98.6 -0.0734 0.941
## 6 f0Increase ForeheadtempIncrease 0.292 1.48 -0.598 0.550
emmeans(m, pairwise~Forehead, by="change")
## $emmeans
## change = f0NS:
## Forehead prob SE df lower.CL upper.CL
## tempNS 0.700027 0.1449 6 0.3454 1.0546
## tempDecrease 0.999640 0.0190 6 0.9532 1.0461
## tempIncrease 0.629618 0.0929 6 0.4022 0.8570
##
## change = f0Decrease:
## Forehead prob SE df lower.CL upper.CL
## tempNS 0.200006 0.1265 6 -0.1095 0.5095
## tempDecrease 0.000258 0.0160 6 -0.0390 0.0395
## tempIncrease 0.333353 0.0907 6 0.1114 0.5553
##
## change = f0Increase:
## Forehead prob SE df lower.CL upper.CL
## tempNS 0.099968 0.0949 6 -0.1321 0.3321
## tempDecrease 0.000103 0.0101 6 -0.0247 0.0249
## tempIncrease 0.037029 0.0363 6 -0.0519 0.1260
##
## Confidence level used: 0.95
##
## $contrasts
## change = f0NS:
## contrast estimate SE df t.ratio p.value
## tempNS - tempDecrease -0.2996 0.1461 6 -2.050 0.1811
## tempNS - tempIncrease 0.0704 0.1722 6 0.409 0.9132
## tempDecrease - tempIncrease 0.3700 0.0949 6 3.901 0.0187
##
## change = f0Decrease:
## contrast estimate SE df t.ratio p.value
## tempNS - tempDecrease 0.1997 0.1275 6 1.567 0.3287
## tempNS - tempIncrease -0.1333 0.1557 6 -0.857 0.6846
## tempDecrease - tempIncrease -0.3331 0.0921 6 -3.615 0.0259
##
## change = f0Increase:
## contrast estimate SE df t.ratio p.value
## tempNS - tempDecrease 0.0999 0.0954 6 1.047 0.5774
## tempNS - tempIncrease 0.0629 0.1016 6 0.620 0.8152
## tempDecrease - tempIncrease -0.0369 0.0377 6 -0.979 0.6155
##
## P value adjustment: tukey method for comparing a family of 3 estimates
No effect of nose temperature change on f0 max change
ggplot(ts |> filter(section=="entireExp", feature=="f0max"), aes(Nose, fill=change))+
geom_bar()+
scale_fill_manual(values = c("f0Decrease" = "lightblue", "f0Increase" = "red", "f0NS" = "gray"))+
ggtitle("Nose temp. change and f0 max change")
tidy(m <- nnet::multinom(change ~ Nose, data=ts |> filter(feature=="f0max", section=="entireExp"))) |>
as_tibble() |>
mutate(estimate = plogis(estimate))
## # weights: 12 (6 variable)
## initial value 41.747267
## iter 10 value 26.766378
## iter 20 value 26.643122
## iter 30 value 26.634308
## iter 40 value 26.633175
## iter 40 value 26.633175
## final value 26.633175
## converged
## # A tibble: 6 × 6
## y.level term estimate std.error statistic p.value
## <chr> <chr> <dbl> <dbl> <dbl> <dbl>
## 1 f0Decrease (Intercept) 0.999 29.1 0.232 0.817
## 2 f0Decrease NosetempDecrease 0.000296 29.1 -0.279 0.780
## 3 f0Decrease NosetempIncrease 0.000508 29.1 -0.261 0.794
## 4 f0Increase (Intercept) 0.999 29.1 0.232 0.817
## 5 f0Increase NosetempDecrease 0.0000000210 122. -0.144 0.885
## 6 f0Increase NosetempIncrease 0.0000566 29.1 -0.336 0.737
emmeans(m, pairwise~Nose, by="change")
## $emmeans
## change = f0NS:
## Nose prob SE df lower.CL upper.CL
## tempNS 5.93e-04 0.01721 6 -0.0415 0.04271
## tempDecrease 8.00e-01 0.17888 6 0.3623 1.23772
## tempIncrease 6.77e-01 0.08396 6 0.4719 0.88282
##
## change = f0Decrease:
## Nose prob SE df lower.CL upper.CL
## tempNS 5.00e-01 0.35355 6 -0.3650 1.36519
## tempDecrease 2.00e-01 0.17888 6 -0.2377 0.63768
## tempIncrease 2.90e-01 0.08153 6 0.0909 0.48984
##
## change = f0Increase:
## Nose prob SE df lower.CL upper.CL
## tempNS 4.99e-01 0.35355 6 -0.3658 1.36445
## tempDecrease 1.41e-05 0.00168 6 -0.0041 0.00413
## tempIncrease 3.23e-02 0.03174 6 -0.0454 0.10994
##
## Confidence level used: 0.95
##
## $contrasts
## change = f0NS:
## contrast estimate SE df t.ratio p.value
## tempNS - tempDecrease -0.7994 0.1797 6 -4.448 0.0103
## tempNS - tempIncrease -0.6768 0.0857 6 -7.896 0.0005
## tempDecrease - tempIncrease 0.1226 0.1976 6 0.621 0.8147
##
## change = f0Decrease:
## contrast estimate SE df t.ratio p.value
## tempNS - tempDecrease 0.3001 0.3962 6 0.757 0.7406
## tempNS - tempIncrease 0.2097 0.3628 6 0.578 0.8364
## tempDecrease - tempIncrease -0.0904 0.1966 6 -0.460 0.8920
##
## change = f0Increase:
## contrast estimate SE df t.ratio p.value
## tempNS - tempDecrease 0.4993 0.3536 6 1.412 0.3931
## tempNS - tempIncrease 0.4671 0.3550 6 1.316 0.4378
## tempDecrease - tempIncrease -0.0323 0.0318 6 -1.015 0.5951
##
## P value adjustment: tukey method for comparing a family of 3 estimates
No effect of eye temperature change on f0 max change (but again, model not working even after filtering out NAs…)
ggplot(ts |> filter(section=="entireExp", feature=="f0max", !is.na(Eyes)), aes(Eyes, fill=change))+
geom_bar()+
scale_fill_manual(values = c("f0Decrease" = "lightblue", "f0Increase" = "red", "f0NS" = "gray"))+
ggtitle("Eye temp. change and f0 max change")
tidy(m <- nnet::multinom(change ~ Eyes, data=ts |> filter(feature=="f0max", section=="entireExp", !is.na(Eyes)))) |>
as_tibble() |>
mutate(estimate = plogis(estimate))
## # weights: 12 (6 variable)
## initial value 36.254206
## iter 10 value 25.524201
## final value 25.505825
## converged
## # A tibble: 6 × 6
## y.level term estimate std.error statistic p.value
## <chr> <chr> <dbl> <dbl> <dbl> <dbl>
## 1 f0Decrease (Intercept) 0.167 1.10e+ 0 -1.47 0.142
## 2 f0Decrease EyestempDecrease 0.5 1.05e-11 0 1
## 3 f0Decrease EyestempIncrease 0.702 1.18e+ 0 0.727 0.467
## 4 f0Increase (Intercept) 0.000263 2.76e+ 1 -0.299 0.765
## 5 f0Increase EyestempDecrease 0.5 0 NaN NaN
## 6 f0Increase EyestempIncrease 0.998 2.76e+ 1 0.221 0.825
# emmeans(m, pairwise~Eyes, by="change")
No effect of cheek temperature change on f0 max change
ggplot(ts |> filter(section=="entireExp", feature=="f0max"), aes(Cheeks, fill=change))+
geom_bar()+
scale_fill_manual(values = c("f0Decrease" = "lightblue", "f0Increase" = "red", "f0NS" = "gray"))+
ggtitle("Cheek temp. change and f0 max change")
tidy(m <- nnet::multinom(change ~ Cheeks, data=ts |> filter(feature=="f0max", section=="entireExp"))) |>
as_tibble() |>
mutate(estimate = plogis(estimate))
## # weights: 12 (6 variable)
## initial value 41.747267
## iter 10 value 27.982898
## iter 20 value 27.946866
## iter 30 value 27.944554
## final value 27.944553
## converged
## # A tibble: 6 × 6
## y.level term estimate std.error statistic p.value
## <chr> <chr> <dbl> <dbl> <dbl> <dbl>
## 1 f0Decrease (Intercept) 0.357 0.558 -1.05 0.292
## 2 f0Decrease CheekstempDecrease 0.340 0.977 -0.681 0.496
## 3 f0Decrease CheekstempIncrease 0.444 0.820 -0.272 0.786
## 4 f0Increase (Intercept) 0.182 0.782 -1.92 0.0543
## 5 f0Increase CheekstempDecrease 0.0000745 92.9 -0.102 0.919
## 6 f0Increase CheekstempIncrease 0.0000119 205. -0.0553 0.956
emmeans(m, pairwise~Cheeks, by="change")
## $emmeans
## change = f0NS:
## Cheeks prob SE df lower.CL upper.CL
## tempNS 5.63e-01 0.124019 6 0.259045 0.865973
## tempDecrease 7.78e-01 0.138580 6 0.438687 1.116871
## tempIncrease 6.92e-01 0.128008 6 0.379076 1.005527
##
## change = f0Decrease:
## Cheeks prob SE df lower.CL upper.CL
## tempNS 3.13e-01 0.115879 6 0.028962 0.596053
## tempDecrease 2.22e-01 0.138577 6 -0.116877 0.561293
## tempIncrease 3.08e-01 0.128008 6 -0.005528 0.620922
##
## change = f0Increase:
## Cheeks prob SE df lower.CL upper.CL
## tempNS 1.25e-01 0.082675 6 -0.077315 0.327282
## tempDecrease 1.29e-05 0.001196 6 -0.002914 0.002940
## tempIncrease 1.80e-06 0.000375 6 -0.000916 0.000919
##
## Confidence level used: 0.95
##
## $contrasts
## change = f0NS:
## contrast estimate SE df t.ratio p.value
## tempNS - tempDecrease -0.215270 0.18597 6 -1.158 0.5175
## tempNS - tempIncrease -0.129792 0.17823 6 -0.728 0.7568
## tempDecrease - tempIncrease 0.085478 0.18865 6 0.453 0.8949
##
## change = f0Decrease:
## contrast estimate SE df t.ratio p.value
## tempNS - tempDecrease 0.090300 0.18064 6 0.500 0.8741
## tempNS - tempIncrease 0.004810 0.17267 6 0.028 0.9996
## tempDecrease - tempIncrease -0.085489 0.18865 6 -0.453 0.8949
##
## change = f0Increase:
## contrast estimate SE df t.ratio p.value
## tempNS - tempDecrease 0.124971 0.08268 6 1.511 0.3507
## tempNS - tempIncrease 0.124982 0.08268 6 1.512 0.3506
## tempDecrease - tempIncrease 0.000011 0.00125 6 0.009 1.0000
##
## P value adjustment: tukey method for comparing a family of 3 estimates
No effect of condition
ggplot(dat |> filter(feature=="f0SD", section=="entireExp"), aes(condition, fill=change))+
geom_bar()+
scale_fill_manual(values = c("decrease" = "lightblue", "increase" = "red", "ns" = "gray"))+
ggtitle("f0 SD across entire experiment")
tidy(m <- nnet::multinom(change ~ condition, data=dat |> filter(feature=="f0SD", section=="entireExp"))) |>
as_tibble() |>
mutate(estimate = plogis(estimate)) # plogis transforms log odds (the output of the model) into probabilities
## # weights: 9 (4 variable)
## initial value 41.747267
## final value 32.831139
## converged
## # A tibble: 4 × 6
## y.level term estimate std.error statistic p.value
## <chr> <chr> <dbl> <dbl> <dbl> <dbl>
## 1 decrease (Intercept) 0.0667 1.04 -2.55 0.0108
## 2 decrease conditionimpersonal 0.848 1.19 1.44 0.148
## 3 increase (Intercept) 0.263 0.521 -1.98 0.0481
## 4 increase conditionimpersonal 0.528 0.788 0.144 0.886
emmeans(m, pairwise~condition, by="change")
## $emmeans
## change = ns:
## condition prob SE df lower.CL upper.CL
## close 0.700 0.1025 4 0.4155 0.985
## impersonal 0.556 0.1171 4 0.2304 0.881
##
## change = decrease:
## condition prob SE df lower.CL upper.CL
## close 0.050 0.0487 4 -0.0853 0.185
## impersonal 0.222 0.0980 4 -0.0498 0.494
##
## change = increase:
## condition prob SE df lower.CL upper.CL
## close 0.250 0.0968 4 -0.0188 0.519
## impersonal 0.222 0.0980 4 -0.0498 0.494
##
## Confidence level used: 0.95
##
## $contrasts
## change = ns:
## contrast estimate SE df t.ratio p.value
## close - impersonal 0.1444 0.156 4 0.928 0.4058
##
## change = decrease:
## contrast estimate SE df t.ratio p.value
## close - impersonal -0.1722 0.109 4 -1.574 0.1907
##
## change = increase:
## contrast estimate SE df t.ratio p.value
## close - impersonal 0.0278 0.138 4 0.202 0.8500
No effect of perception of partner
tidy(m <- nnet::multinom(change ~ closeness, data=dat |> filter(feature=="f0SD", section=="entireExp"))) |>
as_tibble() |>
mutate(estimate = plogis(estimate))
## # weights: 9 (4 variable)
## initial value 41.747267
## final value 33.927892
## converged
## # A tibble: 4 × 6
## y.level term estimate std.error statistic p.value
## <chr> <chr> <dbl> <dbl> <dbl> <dbl>
## 1 decrease (Intercept) 0.273 1.05 -0.929 0.353
## 2 decrease closeness 0.458 0.283 -0.602 0.547
## 3 increase (Intercept) 0.267 0.883 -1.14 0.252
## 4 increase closeness 0.502 0.210 0.0382 0.969
tidy(m <- nnet::multinom(change ~ similarity, data=dat |> filter(feature=="f0SD", section=="entireExp"))) |>
as_tibble() |>
mutate(estimate = plogis(estimate))
## # weights: 9 (4 variable)
## initial value 41.747267
## iter 10 value 33.073731
## iter 10 value 33.073731
## final value 33.073731
## converged
## # A tibble: 4 × 6
## y.level term estimate std.error statistic p.value
## <chr> <chr> <dbl> <dbl> <dbl> <dbl>
## 1 decrease (Intercept) 0.194 1.72 -0.825 0.410
## 2 decrease similarity 0.494 0.284 -0.0884 0.930
## 3 increase (Intercept) 0.685 1.28 0.607 0.544
## 4 increase similarity 0.419 0.235 -1.39 0.164
tidy(m <- nnet::multinom(change ~ likeability, data=dat |> filter(feature=="f0SD", section=="entireExp"))) |>
as_tibble() |>
mutate(estimate = plogis(estimate))
## # weights: 9 (4 variable)
## initial value 41.747267
## iter 10 value 33.604388
## final value 33.604384
## converged
## # A tibble: 4 × 6
## y.level term estimate std.error statistic p.value
## <chr> <chr> <dbl> <dbl> <dbl> <dbl>
## 1 decrease (Intercept) 0.623 2.30 0.219 0.826
## 2 decrease likeability 0.424 0.344 -0.896 0.370
## 3 increase (Intercept) 0.573 1.90 0.156 0.876
## 4 increase likeability 0.454 0.274 -0.679 0.497
tidy(m <- nnet::multinom(change ~ becomeFriends, data=dat |> filter(feature=="f0SD", section=="entireExp"))) |>
as_tibble() |>
mutate(estimate = plogis(estimate))
## # weights: 9 (4 variable)
## initial value 41.747267
## iter 10 value 33.913787
## iter 10 value 33.913787
## final value 33.913787
## converged
## # A tibble: 4 × 6
## y.level term estimate std.error statistic p.value
## <chr> <chr> <dbl> <dbl> <dbl> <dbl>
## 1 decrease (Intercept) 0.272 1.63 -0.603 0.546
## 2 decrease becomeFriends 0.475 0.275 -0.368 0.713
## 3 increase (Intercept) 0.164 1.50 -1.09 0.277
## 4 increase becomeFriends 0.527 0.235 0.455 0.649
No effect of BFI scores
tidy(m <- nnet::multinom(change ~ extraversion, data=dat |> filter(feature=="f0SD", section=="entireExp"))) |>
as_tibble() |>
mutate(estimate = plogis(estimate))
## # weights: 9 (4 variable)
## initial value 41.747267
## iter 10 value 34.018733
## final value 34.017565
## converged
## # A tibble: 4 × 6
## y.level term estimate std.error statistic p.value
## <chr> <chr> <dbl> <dbl> <dbl> <dbl>
## 1 decrease (Intercept) 0.186 2.92 -0.506 0.613
## 2 decrease extraversion 0.493 0.828 -0.0319 0.975
## 3 increase (Intercept) 0.526 2.32 0.0449 0.964
## 4 increase extraversion 0.422 0.672 -0.471 0.638
tidy(m <- nnet::multinom(change ~ openness, data=dat |> filter(feature=="f0SD", section=="entireExp"))) |>
as_tibble() |>
mutate(estimate = plogis(estimate))
## # weights: 9 (4 variable)
## initial value 41.747267
## iter 10 value 33.522097
## final value 33.521832
## converged
## # A tibble: 4 × 6
## y.level term estimate std.error statistic p.value
## <chr> <chr> <dbl> <dbl> <dbl> <dbl>
## 1 decrease (Intercept) 0.0282 2.42 -1.46 0.143
## 2 decrease openness 0.634 0.638 0.860 0.390
## 3 increase (Intercept) 0.0791 1.84 -1.33 0.183
## 4 increase openness 0.602 0.499 0.832 0.405
tidy(m <- nnet::multinom(change ~ agreeableness, data=dat |> filter(feature=="f0SD", section=="entireExp"))) |>
as_tibble() |>
mutate(estimate = plogis(estimate))
## # weights: 9 (4 variable)
## initial value 41.747267
## iter 10 value 33.862243
## final value 33.860466
## converged
## # A tibble: 4 × 6
## y.level term estimate std.error statistic p.value
## <chr> <chr> <dbl> <dbl> <dbl> <dbl>
## 1 decrease (Intercept) 0.185 3.42 -0.434 0.664
## 2 decrease agreeableness 0.494 0.970 -0.0252 0.980
## 3 increase (Intercept) 0.724 2.72 0.355 0.723
## 4 increase agreeableness 0.362 0.794 -0.716 0.474
tidy(m <- nnet::multinom(change ~ conscientiousness, data=dat |> filter(feature=="f0SD", section=="entireExp"))) |>
as_tibble() |>
mutate(estimate = plogis(estimate))
## # weights: 9 (4 variable)
## initial value 41.747267
## iter 10 value 33.930485
## final value 33.928000
## converged
## # A tibble: 4 × 6
## y.level term estimate std.error statistic p.value
## <chr> <chr> <dbl> <dbl> <dbl> <dbl>
## 1 decrease (Intercept) 0.194 3.62 -0.393 0.695
## 2 decrease conscientiousness 0.491 0.887 -0.0405 0.968
## 3 increase (Intercept) 0.672 2.69 0.267 0.789
## 4 increase conscientiousness 0.395 0.671 -0.634 0.526
tidy(m <- nnet::multinom(change ~ neuroticism, data=dat |> filter(feature=="f0SD", section=="entireExp"))) |>
as_tibble() |>
mutate(estimate = plogis(estimate))
## # weights: 9 (4 variable)
## initial value 41.747267
## iter 10 value 34.071871
## final value 34.071086
## converged
## # A tibble: 4 × 6
## y.level term estimate std.error statistic p.value
## <chr> <chr> <dbl> <dbl> <dbl> <dbl>
## 1 decrease (Intercept) 0.202 2.21 -0.621 0.534
## 2 decrease neuroticism 0.484 0.682 -0.0912 0.927
## 3 increase (Intercept) 0.405 1.73 -0.222 0.825
## 4 increase neuroticism 0.453 0.543 -0.350 0.726
No effect of forehead temperature change on f0 SD change
ggplot(ts |> filter(section=="entireExp", feature=="f0SD"), aes(Forehead, fill=change))+
geom_bar()+
scale_fill_manual(values = c("f0Decrease" = "lightblue", "f0Increase" = "red", "f0NS" = "gray"))+
ggtitle("Forehead temp. change and f0 SD change")
tidy(m <- nnet::multinom(change ~ Forehead, data=ts |> filter(feature=="f0SD", section=="entireExp"))) |>
as_tibble() |>
mutate(estimate = plogis(estimate))
## # weights: 12 (6 variable)
## initial value 41.747267
## iter 10 value 32.559031
## iter 20 value 32.546476
## iter 30 value 32.545534
## final value 32.545532
## converged
## # A tibble: 6 × 6
## y.level term estimate std.error statistic p.value
## <chr> <chr> <dbl> <dbl> <dbl> <dbl>
## 1 f0Decrease (Intercept) 0.125 1.07 -1.82 0.0687
## 2 f0Decrease ForeheadtempDecrease 0.235 470. -0.00251 0.998
## 3 f0Decrease ForeheadtempIncrease 0.622 1.20 0.414 0.679
## 4 f0Increase (Intercept) 0.222 0.802 -1.56 0.118
## 5 f0Increase ForeheadtempDecrease 1.00 96.5 0.108 0.914
## 6 f0Increase ForeheadtempIncrease 0.553 0.932 0.227 0.821
emmeans(m, pairwise~Forehead, by="change")
## $emmeans
## change = f0NS:
## Forehead prob SE df lower.CL upper.CL
## tempNS 7.00e-01 0.14492 6 0.34539 1.05458
## tempDecrease 1.07e-04 0.01036 6 -0.02524 0.02546
## tempIncrease 6.30e-01 0.09294 6 0.40222 0.85703
##
## change = f0Decrease:
## Forehead prob SE df lower.CL upper.CL
## tempNS 1.00e-01 0.09487 6 -0.13213 0.33213
## tempDecrease 4.70e-06 0.00217 6 -0.00531 0.00532
## tempIncrease 1.48e-01 0.06837 6 -0.01914 0.31544
##
## change = f0Increase:
## Forehead prob SE df lower.CL upper.CL
## tempNS 2.00e-01 0.12649 6 -0.10951 0.50953
## tempDecrease 1.00e+00 0.01059 6 0.97399 1.02579
## tempIncrease 2.22e-01 0.08001 6 0.02645 0.41800
##
## Confidence level used: 0.95
##
## $contrasts
## change = f0NS:
## contrast estimate SE df t.ratio p.value
## tempNS - tempDecrease 0.6999 0.1453 6 4.817 0.0070
## tempNS - tempIncrease 0.0704 0.1722 6 0.409 0.9133
## tempDecrease - tempIncrease -0.6295 0.0935 6 -6.732 0.0013
##
## change = f0Decrease:
## contrast estimate SE df t.ratio p.value
## tempNS - tempDecrease 0.1000 0.0949 6 1.054 0.5736
## tempNS - tempIncrease -0.0481 0.1169 6 -0.412 0.9121
## tempDecrease - tempIncrease -0.1481 0.0684 6 -2.166 0.1564
##
## change = f0Increase:
## contrast estimate SE df t.ratio p.value
## tempNS - tempDecrease -0.7999 0.1269 6 -6.301 0.0018
## tempNS - tempIncrease -0.0222 0.1497 6 -0.148 0.9880
## tempDecrease - tempIncrease 0.7777 0.0807 6 9.636 0.0002
##
## P value adjustment: tukey method for comparing a family of 3 estimates
No effect of nose temperature change on f0 SD change
ggplot(ts |> filter(section=="entireExp", feature=="f0SD"), aes(Nose, fill=change))+
geom_bar()+
scale_fill_manual(values = c("f0Decrease" = "lightblue", "f0Increase" = "red", "f0NS" = "gray"))+
ggtitle("Nose temp. change and f0 SD change")
tidy(m <- nnet::multinom(change ~ Nose, data=ts |> filter(feature=="f0SD", section=="entireExp"))) |>
as_tibble() |>
mutate(estimate = plogis(estimate))
## # weights: 12 (6 variable)
## initial value 41.747267
## iter 10 value 32.785224
## iter 20 value 32.732617
## iter 30 value 32.729163
## final value 32.729158
## converged
## # A tibble: 6 × 6
## y.level term estimate std.error statistic p.value
## <chr> <chr> <dbl> <dbl> <dbl> <dbl>
## 1 f0Decrease (Intercept) 0.000144 83.4 -0.106 0.916
## 2 f0Decrease NosetempDecrease 0.00945 435. -0.0107 0.991
## 3 f0Decrease NosetempIncrease 0.999 83.4 0.0901 0.928
## 4 f0Increase (Intercept) 0.500 1.41 0.000155 1.00
## 5 f0Increase NosetempDecrease 0.200 1.80 -0.769 0.442
## 6 f0Increase NosetempIncrease 0.269 1.48 -0.674 0.500
emmeans(m, pairwise~Nose, by="change")
## $emmeans
## change = f0NS:
## Nose prob SE df lower.CL upper.CL
## tempNS 5.00e-01 0.353553 6 -0.36520 1.36502
## tempDecrease 8.00e-01 0.178885 6 0.36229 1.23772
## tempIncrease 6.13e-01 0.087483 6 0.39884 0.82697
##
## change = f0Decrease:
## Nose prob SE df lower.CL upper.CL
## tempNS 7.19e-05 0.005995 6 -0.01460 0.01474
## tempDecrease 1.10e-06 0.000468 6 -0.00115 0.00115
## tempIncrease 1.61e-01 0.066058 6 -0.00035 0.32293
##
## change = f0Increase:
## Nose prob SE df lower.CL upper.CL
## tempNS 5.00e-01 0.353553 6 -0.36510 1.36513
## tempDecrease 2.00e-01 0.178884 6 -0.23772 0.63771
## tempIncrease 2.26e-01 0.075095 6 0.04206 0.40956
##
## Confidence level used: 0.95
##
## $contrasts
## change = f0NS:
## contrast estimate SE df t.ratio p.value
## tempNS - tempDecrease -3.00e-01 0.39623 6 -0.757 0.7406
## tempNS - tempIncrease -1.13e-01 0.36422 6 -0.310 0.9488
## tempDecrease - tempIncrease 1.87e-01 0.19913 6 0.940 0.6375
##
## change = f0Decrease:
## contrast estimate SE df t.ratio p.value
## tempNS - tempDecrease 7.08e-05 0.00601 6 0.012 0.9999
## tempNS - tempIncrease -1.61e-01 0.06633 6 -2.431 0.1115
## tempDecrease - tempIncrease -1.61e-01 0.06606 6 -2.442 0.1100
##
## change = f0Increase:
## contrast estimate SE df t.ratio p.value
## tempNS - tempDecrease 3.00e-01 0.39623 6 0.757 0.7407
## tempNS - tempIncrease 2.74e-01 0.36144 6 0.759 0.7399
## tempDecrease - tempIncrease -2.58e-02 0.19401 6 -0.133 0.9903
##
## P value adjustment: tukey method for comparing a family of 3 estimates
No effect of eye temperature change on f0 SD change
ggplot(ts |> filter(section=="entireExp", feature=="f0SD", !is.na(Eyes)), aes(Eyes, fill=change))+
geom_bar()+
scale_fill_manual(values = c("f0Decrease" = "lightblue", "f0Increase" = "red", "f0NS" = "gray"))+
ggtitle("Eye temp. change and f0 SD change")
tidy(m <- nnet::multinom(change ~ Eyes, data=ts |> filter(feature=="f0SD", section=="entireExp", !is.na(Eyes)))) |>
as_tibble() |>
mutate(estimate = plogis(estimate))
## # weights: 12 (6 variable)
## initial value 36.254206
## iter 10 value 26.624975
## iter 20 value 26.610238
## iter 30 value 26.609178
## final value 26.609178
## converged
## # A tibble: 6 × 6
## y.level term estimate std.error statistic p.value
## <chr> <chr> <dbl> <dbl> <dbl> <dbl>
## 1 f0Decrease (Intercept) 0.00000996 1.42e+2 -0.0813 0.935
## 2 f0Decrease EyestempDecrease 0.5 1.31e-9 0 1
## 3 f0Decrease EyestempIncrease 1.00 1.42e+2 0.0690 0.945
## 4 f0Increase (Intercept) 0.167 1.10e+0 -1.47 0.142
## 5 f0Increase EyestempDecrease 0.5 0 NaN NaN
## 6 f0Increase EyestempIncrease 0.673 1.18e+0 0.610 0.542
# emmeans(m, pairwise~Eyes, by="change") # isn't working (I'm thinking maybe because "eyes" have lots of missing values)
No effect of cheek temperature change on f0 SD change
ggplot(ts |> filter(section=="entireExp", feature=="f0SD"), aes(Cheeks, fill=change))+
geom_bar()+
scale_fill_manual(values = c("f0Decrease" = "lightblue", "f0Increase" = "red", "f0NS" = "gray"))+
ggtitle("Cheek temp. change and f0 SD change")
tidy(m <- nnet::multinom(change ~ Cheeks, data=ts |> filter(feature=="f0SD", section=="entireExp"))) |>
as_tibble() |>
mutate(estimate = plogis(estimate))
## # weights: 12 (6 variable)
## initial value 41.747267
## iter 10 value 30.881893
## iter 20 value 30.849472
## final value 30.847366
## converged
## # A tibble: 6 × 6
## y.level term estimate std.error statistic p.value
## <chr> <chr> <dbl> <dbl> <dbl> <dbl>
## 1 f0Decrease (Intercept) 0.0909 1.05 -2.20 0.0281
## 2 f0Decrease CheekstempDecrease 0.000189 86.9 -0.0986 0.921
## 3 f0Decrease CheekstempIncrease 0.851 1.22 1.43 0.154
## 4 f0Increase (Intercept) 0.333 0.548 -1.27 0.206
## 5 f0Increase CheekstempDecrease 0.364 0.971 -0.576 0.564
## 6 f0Increase CheekstempIncrease 0.364 0.971 -0.576 0.564
emmeans(m, pairwise~Cheeks, by="change")
## $emmeans
## change = f0NS:
## Cheeks prob SE df lower.CL upper.CL
## tempNS 6.25e-01 0.12103 6 0.32883 0.92114
## tempDecrease 7.78e-01 0.13859 6 0.43864 1.11686
## tempIncrease 5.38e-01 0.13826 6 0.20014 0.87678
##
## change = f0Decrease:
## Cheeks prob SE df lower.CL upper.CL
## tempNS 6.25e-02 0.06052 6 -0.08558 0.21059
## tempDecrease 1.47e-05 0.00128 6 -0.00311 0.00314
## tempIncrease 3.08e-01 0.12801 6 -0.00552 0.62093
##
## change = f0Increase:
## Cheeks prob SE df lower.CL upper.CL
## tempNS 3.13e-01 0.11588 6 0.02896 0.59606
## tempDecrease 2.22e-01 0.13858 6 -0.11686 0.56134
## tempIncrease 1.54e-01 0.10007 6 -0.09101 0.39869
##
## Confidence level used: 0.95
##
## $contrasts
## change = f0NS:
## contrast estimate SE df t.ratio p.value
## tempNS - tempDecrease -0.1528 0.1840 6 -0.830 0.6996
## tempNS - tempIncrease 0.0865 0.1838 6 0.471 0.8872
## tempDecrease - tempIncrease 0.2393 0.1958 6 1.222 0.4840
##
## change = f0Decrease:
## contrast estimate SE df t.ratio p.value
## tempNS - tempDecrease 0.0625 0.0605 6 1.032 0.5854
## tempNS - tempIncrease -0.2452 0.1416 6 -1.732 0.2695
## tempDecrease - tempIncrease -0.3077 0.1280 6 -2.404 0.1154
##
## change = f0Increase:
## contrast estimate SE df t.ratio p.value
## tempNS - tempDecrease 0.0903 0.1806 6 0.500 0.8741
## tempNS - tempIncrease 0.1587 0.1531 6 1.036 0.5832
## tempDecrease - tempIncrease 0.0684 0.1709 6 0.400 0.9167
##
## P value adjustment: tukey method for comparing a family of 3 estimates
load(paste0(here::here(), "/data/speechData-allIPUs.RData"))
# ipus <- ipus |>
# filter(gender != "Male")
ggplot(ipus, aes(turnOverall, f0medz))+
geom_point()+
geom_smooth(method="lm")+
facet_wrap(~condition)+
ggtitle("Entire experiment")
## `geom_smooth()` using formula = 'y ~ x'
## Warning: Removed 188 rows containing non-finite values (`stat_smooth()`).
## Warning: Removed 188 rows containing missing values (`geom_point()`).
summary(m <- lmer(f0medz ~ turnOverall + (1|speaker), ipus))
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: f0medz ~ turnOverall + (1 | speaker)
## Data: ipus
##
## REML criterion at convergence: 14062.2
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -5.6272 -0.6559 -0.1142 0.5397 4.9370
##
## Random effects:
## Groups Name Variance Std.Dev.
## speaker (Intercept) 0.001996 0.04468
## Residual 0.937662 0.96833
## Number of obs: 5060, groups: speaker, 38
##
## Fixed effects:
## Estimate Std. Error df t value Pr(>|t|)
## (Intercept) -5.193e-02 2.743e-02 3.421e+02 -1.893 0.059210 .
## turnOverall 1.056e-03 3.091e-04 4.341e+03 3.417 0.000639 ***
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation of Fixed Effects:
## (Intr)
## turnOverall -0.825
summary(m1 <- lmer(f0medz ~ turnOverall : condition + (1|speaker), ipus))
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: f0medz ~ turnOverall:condition + (1 | speaker)
## Data: ipus
##
## REML criterion at convergence: 14073.6
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -5.6239 -0.6542 -0.1149 0.5345 4.9450
##
## Random effects:
## Groups Name Variance Std.Dev.
## speaker (Intercept) 0.002054 0.04532
## Residual 0.937313 0.96815
## Number of obs: 5060, groups: speaker, 38
##
## Fixed effects:
## Estimate Std. Error df t value
## (Intercept) -5.606e-02 2.757e-02 3.361e+02 -2.033
## turnOverall:conditionclose 1.431e-03 3.849e-04 7.182e+02 3.717
## turnOverall:conditionimpersonal 8.505e-04 3.342e-04 8.997e+02 2.545
## Pr(>|t|)
## (Intercept) 0.042818 *
## turnOverall:conditionclose 0.000217 ***
## turnOverall:conditionimpersonal 0.011091 *
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation of Fixed Effects:
## (Intr) trnOvrll:cndtnc
## trnOvrll:cndtnc -0.713
## trnOvrll:cndtnm -0.724 0.516
anova(m, m1)
## refitting model(s) with ML (instead of REML)
## Data: ipus
## Models:
## m: f0medz ~ turnOverall + (1 | speaker)
## m1: f0medz ~ turnOverall:condition + (1 | speaker)
## npar AIC BIC logLik deviance Chisq Df Pr(>Chisq)
## m 4 14049 14076 -7020.7 14041
## m1 5 14049 14081 -7019.4 14039 2.6732 1 0.102
par(mfrow=c(2, 2))
hist(resid(m))
qqnorm(resid(m));qqline(resid(m))
plot(fitted(m), resid(m))
ggplot(ipus |> filter(task=="Lists"), aes(turnOverall, f0medz))+
geom_point()+
geom_smooth(method="lm")+
facet_wrap(~condition)+
ggtitle("Question lists (1st part of experiment)")
## `geom_smooth()` using formula = 'y ~ x'
## Warning: Removed 42 rows containing non-finite values (`stat_smooth()`).
## Warning: Removed 42 rows containing missing values (`geom_point()`).
summary(m <- lmer(f0medz ~ turnOverall + (1|speaker), ipus |> filter(task=="Lists")))
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: f0medz ~ turnOverall + (1 | speaker)
## Data: filter(ipus, task == "Lists")
##
## REML criterion at convergence: 3938.2
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -5.4269 -0.6360 -0.1422 0.5219 4.5342
##
## Random effects:
## Groups Name Variance Std.Dev.
## speaker (Intercept) 0.03997 0.1999
## Residual 1.00093 1.0005
## Number of obs: 1371, groups: speaker, 38
##
## Fixed effects:
## Estimate Std. Error df t value Pr(>|t|)
## (Intercept) -3.710e-02 5.928e-02 1.329e+02 -0.626 0.532
## turnOverall -4.771e-04 2.053e-03 1.193e+03 -0.232 0.816
##
## Correlation of Fixed Effects:
## (Intr)
## turnOverall -0.692
summary(m1 <- lmer(f0medz ~ turnOverall : condition + (1|speaker), ipus |> filter(task=="Lists")))
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: f0medz ~ turnOverall:condition + (1 | speaker)
## Data: filter(ipus, task == "Lists")
##
## REML criterion at convergence: 3947.7
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -5.4275 -0.6364 -0.1414 0.5211 4.5292
##
## Random effects:
## Groups Name Variance Std.Dev.
## speaker (Intercept) 0.04061 0.2015
## Residual 1.00141 1.0007
## Number of obs: 1371, groups: speaker, 38
##
## Fixed effects:
## Estimate Std. Error df t value
## (Intercept) -3.518e-02 6.032e-02 1.330e+02 -0.583
## turnOverall:conditionclose -9.304e-04 3.248e-03 3.872e+02 -0.286
## turnOverall:conditionimpersonal -3.484e-04 2.189e-03 6.449e+02 -0.159
## Pr(>|t|)
## (Intercept) 0.561
## turnOverall:conditionclose 0.775
## turnOverall:conditionimpersonal 0.874
##
## Correlation of Fixed Effects:
## (Intr) trnOvrll:cndtnc
## trnOvrll:cndtnc -0.562
## trnOvrll:cndtnm -0.579 0.325
anova(m, m1)
## refitting model(s) with ML (instead of REML)
## Data: filter(ipus, task == "Lists")
## Models:
## m: f0medz ~ turnOverall + (1 | speaker)
## m1: f0medz ~ turnOverall:condition + (1 | speaker)
## npar AIC BIC logLik deviance Chisq Df Pr(>Chisq)
## m 4 3931.2 3952.0 -1961.6 3923.2
## m1 5 3933.1 3959.2 -1961.6 3923.1 0.0401 1 0.8412
par(mfrow=c(2, 2))
hist(resid(m))
qqnorm(resid(m));qqline(resid(m))
plot(fitted(m), resid(m))
ggplot(ipus |> filter(task=="Diapix"), aes(turnOverall, f0medz))+
geom_point()+
geom_smooth(method="lm")+
facet_wrap(~condition)+
ggtitle("Diapix (2nd part of experiment)")
## `geom_smooth()` using formula = 'y ~ x'
## Warning: Removed 146 rows containing non-finite values (`stat_smooth()`).
## Warning: Removed 146 rows containing missing values (`geom_point()`).
summary(m <- lmer(f0medz ~ turnOverall + (1|speaker), ipus |> filter(task=="Diapix")))
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: f0medz ~ turnOverall + (1 | speaker)
## Data: filter(ipus, task == "Diapix")
##
## REML criterion at convergence: 10074.7
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -4.4970 -0.6472 -0.0909 0.5529 4.6796
##
## Random effects:
## Groups Name Variance Std.Dev.
## speaker (Intercept) 0.01985 0.1409
## Residual 0.88424 0.9403
## Number of obs: 3689, groups: speaker, 36
##
## Fixed effects:
## Estimate Std. Error df t value Pr(>|t|)
## (Intercept) -6.237e-02 5.208e-02 2.932e+02 -1.198 0.232
## turnOverall 1.192e-03 4.741e-04 2.854e+03 2.514 0.012 *
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation of Fixed Effects:
## (Intr)
## turnOverall -0.841
summary(m1 <- lmer(f0medz ~ turnOverall : condition + (1|speaker), ipus |> filter(task=="Diapix")))
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: f0medz ~ turnOverall:condition + (1 | speaker)
## Data: filter(ipus, task == "Diapix")
##
## REML criterion at convergence: 10087
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -4.4967 -0.6501 -0.0881 0.5534 4.6895
##
## Random effects:
## Groups Name Variance Std.Dev.
## speaker (Intercept) 0.01951 0.1397
## Residual 0.88434 0.9404
## Number of obs: 3689, groups: speaker, 36
##
## Fixed effects:
## Estimate Std. Error df t value
## (Intercept) -6.617e-02 5.213e-02 3.119e+02 -1.269
## turnOverall:conditionclose 1.513e-03 5.741e-04 9.141e+02 2.635
## turnOverall:conditionimpersonal 9.963e-04 5.118e-04 5.802e+02 1.947
## Pr(>|t|)
## (Intercept) 0.20533
## turnOverall:conditionclose 0.00855 **
## turnOverall:conditionimpersonal 0.05207 .
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation of Fixed Effects:
## (Intr) trnOvrll:cndtnc
## trnOvrll:cndtnc -0.737
## trnOvrll:cndtnm -0.748 0.552
anova(m, m1)
## refitting model(s) with ML (instead of REML)
## Data: filter(ipus, task == "Diapix")
## Models:
## m: f0medz ~ turnOverall + (1 | speaker)
## m1: f0medz ~ turnOverall:condition + (1 | speaker)
## npar AIC BIC logLik deviance Chisq Df Pr(>Chisq)
## m 4 10064 10089 -5027.9 10056
## m1 5 10065 10096 -5027.4 10055 1.0392 1 0.308
par(mfrow=c(2, 2))
hist(resid(m))
qqnorm(resid(m));qqline(resid(m))
plot(fitted(m), resid(m))
Checking if f0 median changes across time in interaction with questionnaire scores: Even though many of the models are significant, their AIC is not preferred over the model without the questionnaire scores.
summary(m <- lmer(f0medz ~ turnOverall + (1|speaker), ipus))
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: f0medz ~ turnOverall + (1 | speaker)
## Data: ipus
##
## REML criterion at convergence: 14062.2
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -5.6272 -0.6559 -0.1142 0.5397 4.9370
##
## Random effects:
## Groups Name Variance Std.Dev.
## speaker (Intercept) 0.001996 0.04468
## Residual 0.937662 0.96833
## Number of obs: 5060, groups: speaker, 38
##
## Fixed effects:
## Estimate Std. Error df t value Pr(>|t|)
## (Intercept) -5.193e-02 2.743e-02 3.421e+02 -1.893 0.059210 .
## turnOverall 1.056e-03 3.091e-04 4.341e+03 3.417 0.000639 ***
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation of Fixed Effects:
## (Intr)
## turnOverall -0.825
summary(m1 <- lmer(f0medz ~ turnOverall : closeness + (1|speaker), ipus))
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: f0medz ~ turnOverall:closeness + (1 | speaker)
## Data: ipus
##
## REML criterion at convergence: 14073.6
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -5.6631 -0.6588 -0.1074 0.5381 4.9145
##
## Random effects:
## Groups Name Variance Std.Dev.
## speaker (Intercept) 0.002762 0.05256
## Residual 0.938625 0.96883
## Number of obs: 5060, groups: speaker, 38
##
## Fixed effects:
## Estimate Std. Error df t value Pr(>|t|)
## (Intercept) -6.905e-03 2.317e-02 7.737e+01 -0.298 0.7665
## turnOverall:closeness 1.189e-04 6.052e-05 2.565e+02 1.964 0.0506 .
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation of Fixed Effects:
## (Intr)
## trnOvrll:cl -0.715
summary(m2 <- lmer(f0medz ~ turnOverall : similarity + (1|speaker), ipus))
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: f0medz ~ turnOverall:similarity + (1 | speaker)
## Data: ipus
##
## REML criterion at convergence: 14075.3
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -5.6546 -0.6584 -0.1077 0.5365 4.9077
##
## Random effects:
## Groups Name Variance Std.Dev.
## speaker (Intercept) 0.002078 0.04559
## Residual 0.939356 0.96920
## Number of obs: 5060, groups: speaker, 38
##
## Fixed effects:
## Estimate Std. Error df t value Pr(>|t|)
## (Intercept) -4.552e-03 2.537e-02 1.504e+02 -0.179 0.858
## turnOverall:similarity 7.349e-05 4.906e-05 8.452e+02 1.498 0.135
##
## Correlation of Fixed Effects:
## (Intr)
## trnOvrll:sm -0.789
summary(m3 <- lmer(f0medz ~ turnOverall : likeability + (1|speaker), ipus))
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: f0medz ~ turnOverall:likeability + (1 | speaker)
## Data: ipus
##
## REML criterion at convergence: 14071.3
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -5.6419 -0.6596 -0.1082 0.5360 4.9234
##
## Random effects:
## Groups Name Variance Std.Dev.
## speaker (Intercept) 0.002234 0.04727
## Residual 0.938452 0.96874
## Number of obs: 5060, groups: speaker, 38
##
## Fixed effects:
## Estimate Std. Error df t value Pr(>|t|)
## (Intercept) -2.936e-02 2.658e-02 2.130e+02 -1.105 0.2705
## turnOverall:likeability 1.109e-04 4.334e-05 1.904e+03 2.559 0.0106 *
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation of Fixed Effects:
## (Intr)
## trnOvrll:lk -0.806
summary(m4 <- lmer(f0medz ~ turnOverall : becomeFriends + (1|speaker), ipus))
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: f0medz ~ turnOverall:becomeFriends + (1 | speaker)
## Data: ipus
##
## REML criterion at convergence: 14073.6
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -5.6524 -0.6586 -0.1082 0.5371 4.9203
##
## Random effects:
## Groups Name Variance Std.Dev.
## speaker (Intercept) 0.002421 0.04921
## Residual 0.938759 0.96890
## Number of obs: 5060, groups: speaker, 38
##
## Fixed effects:
## Estimate Std. Error df t value Pr(>|t|)
## (Intercept) -1.560e-02 2.552e-02 1.477e+02 -0.612 0.5418
## turnOverall:becomeFriends 9.565e-05 4.640e-05 8.303e+02 2.061 0.0396 *
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation of Fixed Effects:
## (Intr)
## trnOvrll:bF -0.782
anova(m, m1)
## refitting model(s) with ML (instead of REML)
## Data: ipus
## Models:
## m: f0medz ~ turnOverall + (1 | speaker)
## m1: f0medz ~ turnOverall:closeness + (1 | speaker)
## npar AIC BIC logLik deviance Chisq Df Pr(>Chisq)
## m 4 14049 14076 -7020.7 14041
## m1 4 14058 14084 -7024.8 14050 0 0
anova(m, m2)
## refitting model(s) with ML (instead of REML)
## Data: ipus
## Models:
## m: f0medz ~ turnOverall + (1 | speaker)
## m2: f0medz ~ turnOverall:similarity + (1 | speaker)
## npar AIC BIC logLik deviance Chisq Df Pr(>Chisq)
## m 4 14049 14076 -7020.7 14041
## m2 4 14059 14085 -7025.4 14051 0 0
anova(m, m3)
## refitting model(s) with ML (instead of REML)
## Data: ipus
## Models:
## m: f0medz ~ turnOverall + (1 | speaker)
## m3: f0medz ~ turnOverall:likeability + (1 | speaker)
## npar AIC BIC logLik deviance Chisq Df Pr(>Chisq)
## m 4 14049 14076 -7020.7 14041
## m3 4 14055 14081 -7023.3 14047 0 0
anova(m, m4)
## refitting model(s) with ML (instead of REML)
## Data: ipus
## Models:
## m: f0medz ~ turnOverall + (1 | speaker)
## m4: f0medz ~ turnOverall:becomeFriends + (1 | speaker)
## npar AIC BIC logLik deviance Chisq Df Pr(>Chisq)
## m 4 14049 14076 -7020.7 14041
## m4 4 14057 14083 -7024.5 14049 0 0
condition was 1.7 smaller than without), there f0 max
went down in both conditions.load(paste0(here::here(), "/data/speechData-allIPUs.RData"))
# ipus <- ipus |>
# filter(gender != "Male")
ggplot(ipus, aes(turnOverall, f0maxz))+
geom_point()+
geom_smooth(method="lm")+
facet_wrap(~condition)+
ggtitle("Entire experiment")
## `geom_smooth()` using formula = 'y ~ x'
## Warning: Removed 111 rows containing non-finite values (`stat_smooth()`).
## Warning: Removed 111 rows containing missing values (`geom_point()`).
summary(m <- lm(f0maxz ~ turnOverall, ipus))
##
## Call:
## lm(formula = f0maxz ~ turnOverall, data = ipus)
##
## Residuals:
## Min 1Q Median 3Q Max
## -3.6147 -0.6920 -0.1241 0.5394 4.7085
##
## Coefficients:
## Estimate Std. Error t value Pr(>|t|)
## (Intercept) 0.134455 0.026883 5.001 5.88e-07 ***
## turnOverall -0.001669 0.000313 -5.331 1.02e-07 ***
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Residual standard error: 0.9929 on 5135 degrees of freedom
## (111 observations deleted due to missingness)
## Multiple R-squared: 0.005504, Adjusted R-squared: 0.00531
## F-statistic: 28.42 on 1 and 5135 DF, p-value: 1.019e-07
summary(m1 <- lm(f0maxz ~ turnOverall : condition, ipus))
##
## Call:
## lm(formula = f0maxz ~ turnOverall:condition, data = ipus)
##
## Residuals:
## Min 1Q Median 3Q Max
## -3.6120 -0.6928 -0.1237 0.5413 4.7147
##
## Coefficients:
## Estimate Std. Error t value Pr(>|t|)
## (Intercept) 0.1350865 0.0270132 5.001 5.90e-07 ***
## turnOverall:conditionclose -0.0017212 0.0003811 -4.516 6.44e-06 ***
## turnOverall:conditionimpersonal -0.0016420 0.0003323 -4.942 7.98e-07 ***
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Residual standard error: 0.993 on 5134 degrees of freedom
## (111 observations deleted due to missingness)
## Multiple R-squared: 0.005515, Adjusted R-squared: 0.005128
## F-statistic: 14.24 on 2 and 5134 DF, p-value: 6.832e-07
anova(m, m1)
## Analysis of Variance Table
##
## Model 1: f0maxz ~ turnOverall
## Model 2: f0maxz ~ turnOverall:condition
## Res.Df RSS Df Sum of Sq F Pr(>F)
## 1 5135 5062.8
## 2 5134 5062.8 1 0.057298 0.0581 0.8095
par(mfrow=c(2, 2))
hist(resid(m))
qqnorm(resid(m));qqline(resid(m))
plot(fitted(m), resid(m))
ggplot(ipus |> filter(task=="Lists"), aes(turnOverall, f0maxz))+
geom_point()+
geom_smooth(method="lm")+
facet_wrap(~condition)+
ggtitle("Question lists (1st part of experiment)")
## `geom_smooth()` using formula = 'y ~ x'
## Warning: Removed 23 rows containing non-finite values (`stat_smooth()`).
## Warning: Removed 23 rows containing missing values (`geom_point()`).
summary(m <- lmer(f0maxz ~ turnOverall + (1|speaker), ipus |> filter(task=="Lists")))
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: f0maxz ~ turnOverall + (1 | speaker)
## Data: filter(ipus, task == "Lists")
##
## REML criterion at convergence: 4166.3
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -2.4119 -0.6898 -0.1841 0.5046 4.1633
##
## Random effects:
## Groups Name Variance Std.Dev.
## speaker (Intercept) 0.03965 0.1991
## Residual 1.13682 1.0662
## Number of obs: 1390, groups: speaker, 38
##
## Fixed effects:
## Estimate Std. Error df t value Pr(>|t|)
## (Intercept) 3.425e-01 6.168e-02 1.251e+02 5.554 1.60e-07 ***
## turnOverall -9.977e-03 2.167e-03 1.150e+03 -4.603 4.62e-06 ***
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation of Fixed Effects:
## (Intr)
## turnOverall -0.704
summary(m1 <- lmer(f0maxz ~ turnOverall : condition + (1|speaker), ipus |> filter(task=="Lists")))
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: f0maxz ~ turnOverall:condition + (1 | speaker)
## Data: filter(ipus, task == "Lists")
##
## REML criterion at convergence: 4171.8
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -2.3679 -0.6888 -0.1690 0.5041 4.1652
##
## Random effects:
## Groups Name Variance Std.Dev.
## speaker (Intercept) 0.05317 0.2306
## Residual 1.12918 1.0626
## Number of obs: 1390, groups: speaker, 38
##
## Fixed effects:
## Estimate Std. Error df t value
## (Intercept) 0.321492 0.065312 97.257020 4.922
## turnOverall:conditionclose -0.004438 0.003454 338.987109 -1.285
## turnOverall:conditionimpersonal -0.011863 0.002337 591.864519 -5.075
## Pr(>|t|)
## (Intercept) 3.49e-06 ***
## turnOverall:conditionclose 0.2
## turnOverall:conditionimpersonal 5.19e-07 ***
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation of Fixed Effects:
## (Intr) trnOvrll:cndtnc
## trnOvrll:cndtnc -0.546
## trnOvrll:cndtnm -0.558 0.304
anova(m, m1)
## refitting model(s) with ML (instead of REML)
## Data: filter(ipus, task == "Lists")
## Models:
## m: f0maxz ~ turnOverall + (1 | speaker)
## m1: f0maxz ~ turnOverall:condition + (1 | speaker)
## npar AIC BIC logLik deviance Chisq Df Pr(>Chisq)
## m 4 4159.4 4180.4 -2075.7 4151.4
## m1 5 4157.7 4183.9 -2073.8 4147.7 3.7813 1 0.05183 .
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
par(mfrow=c(2, 2))
hist(resid(m))
qqnorm(resid(m));qqline(resid(m))
plot(fitted(m), resid(m))
ggplot(ipus |> filter(task=="Diapix"), aes(turnOverall, f0maxz))+
geom_point()+
geom_smooth(method="lm")+
facet_wrap(~condition)+
ggtitle("Diapix (2nd part of experiment)")
## `geom_smooth()` using formula = 'y ~ x'
## Warning: Removed 88 rows containing non-finite values (`stat_smooth()`).
## Warning: Removed 88 rows containing missing values (`geom_point()`).
summary(m <- lmer(f0maxz ~ turnOverall + (1|speaker), ipus |> filter(task=="Diapix")))
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: f0maxz ~ turnOverall + (1 | speaker)
## Data: filter(ipus, task == "Diapix")
##
## REML criterion at convergence: 10288.2
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -3.8069 -0.6934 -0.1074 0.5672 4.9201
##
## Random effects:
## Groups Name Variance Std.Dev.
## speaker (Intercept) 0.009223 0.09604
## Residual 0.901496 0.94947
## Number of obs: 3747, groups: speaker, 36
##
## Fixed effects:
## Estimate Std. Error df t value Pr(>|t|)
## (Intercept) -4.206e-03 4.867e-02 4.109e+02 -0.086 0.931
## turnOverall -3.555e-04 4.674e-04 2.054e+03 -0.760 0.447
##
## Correlation of Fixed Effects:
## (Intr)
## turnOverall -0.888
summary(m1 <- lmer(f0maxz ~ turnOverall : condition + (1|speaker), ipus |> filter(task=="Diapix")))
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: f0maxz ~ turnOverall:condition + (1 | speaker)
## Data: filter(ipus, task == "Diapix")
##
## REML criterion at convergence: 10301.8
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -3.8077 -0.6947 -0.1077 0.5669 4.9243
##
## Random effects:
## Groups Name Variance Std.Dev.
## speaker (Intercept) 0.009725 0.09862
## Residual 0.901501 0.94947
## Number of obs: 3747, groups: speaker, 36
##
## Fixed effects:
## Estimate Std. Error df t value
## (Intercept) -3.374e-03 4.917e-02 4.492e+02 -0.069
## turnOverall:conditionclose -4.092e-04 5.548e-04 8.961e+02 -0.737
## turnOverall:conditionimpersonal -3.279e-04 4.894e-04 5.851e+02 -0.670
## Pr(>|t|)
## (Intercept) 0.945
## turnOverall:conditionclose 0.461
## turnOverall:conditionimpersonal 0.503
##
## Correlation of Fixed Effects:
## (Intr) trnOvrll:cndtnc
## trnOvrll:cndtnc -0.803
## trnOvrll:cndtnm -0.810 0.650
anova(m, m1)
## refitting model(s) with ML (instead of REML)
## Data: filter(ipus, task == "Diapix")
## Models:
## m: f0maxz ~ turnOverall + (1 | speaker)
## m1: f0maxz ~ turnOverall:condition + (1 | speaker)
## npar AIC BIC logLik deviance Chisq Df Pr(>Chisq)
## m 4 10277 10302 -5134.5 10269
## m1 5 10279 10310 -5134.5 10269 0.027 1 0.8694
par(mfrow=c(2, 2))
hist(resid(m))
qqnorm(resid(m));qqline(resid(m))
plot(fitted(m), resid(m))
Checking if f0 SD changes across time in interaction with questionnaire scores: Even though many of the models are significant, their AIC is not preferred over the model without the questionnaire scores.
summary(m <- lmer(f0maxz ~ turnOverall + (1|speaker), ipus))
## boundary (singular) fit: see help('isSingular')
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: f0maxz ~ turnOverall + (1 | speaker)
## Data: ipus
##
## REML criterion at convergence: 14524.5
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -3.6404 -0.6969 -0.1250 0.5432 4.7420
##
## Random effects:
## Groups Name Variance Std.Dev.
## speaker (Intercept) 0.0000 0.0000
## Residual 0.9859 0.9929
## Number of obs: 5137, groups: speaker, 38
##
## Fixed effects:
## Estimate Std. Error df t value Pr(>|t|)
## (Intercept) 1.345e-01 2.688e-02 5.135e+03 5.001 5.88e-07 ***
## turnOverall -1.669e-03 3.130e-04 5.135e+03 -5.331 1.02e-07 ***
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation of Fixed Effects:
## (Intr)
## turnOverall -0.857
## optimizer (nloptwrap) convergence code: 0 (OK)
## boundary (singular) fit: see help('isSingular')
summary(m1 <- lmer(f0maxz ~ turnOverall : closeness + (1|speaker), ipus))
## boundary (singular) fit: see help('isSingular')
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: f0maxz ~ turnOverall:closeness + (1 | speaker)
## Data: ipus
##
## REML criterion at convergence: 14539.3
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -3.6395 -0.6847 -0.1290 0.5350 4.7623
##
## Random effects:
## Groups Name Variance Std.Dev.
## speaker (Intercept) 0.0000 0.0000
## Residual 0.9881 0.9941
## Number of obs: 5137, groups: speaker, 38
##
## Fixed effects:
## Estimate Std. Error df t value Pr(>|t|)
## (Intercept) 7.729e-02 2.114e-02 5.135e+03 3.656 0.000259 ***
## turnOverall:closeness -2.371e-04 5.764e-05 5.135e+03 -4.115 3.94e-05 ***
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation of Fixed Effects:
## (Intr)
## trnOvrll:cl -0.755
## optimizer (nloptwrap) convergence code: 0 (OK)
## boundary (singular) fit: see help('isSingular')
summary(m2 <- lmer(f0maxz ~ turnOverall : similarity + (1|speaker), ipus))
## boundary (singular) fit: see help('isSingular')
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: f0maxz ~ turnOverall:similarity + (1 | speaker)
## Data: ipus
##
## REML criterion at convergence: 14531.3
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -3.6757 -0.6894 -0.1292 0.5387 4.8003
##
## Random effects:
## Groups Name Variance Std.Dev.
## speaker (Intercept) 0.0000 0.0000
## Residual 0.9865 0.9932
## Number of obs: 5137, groups: speaker, 38
##
## Fixed effects:
## Estimate Std. Error df t value Pr(>|t|)
## (Intercept) 1.116e-01 2.422e-02 5.135e+03 4.607 4.19e-06 ***
## turnOverall:similarity -2.440e-04 4.850e-05 5.135e+03 -5.031 5.05e-07 ***
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation of Fixed Effects:
## (Intr)
## trnOvrll:sm -0.820
## optimizer (nloptwrap) convergence code: 0 (OK)
## boundary (singular) fit: see help('isSingular')
summary(m3 <- lmer(f0maxz ~ turnOverall : likeability + (1|speaker), ipus))
## boundary (singular) fit: see help('isSingular')
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: f0maxz ~ turnOverall:likeability + (1 | speaker)
## Data: ipus
##
## REML criterion at convergence: 14530.2
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -3.6772 -0.6917 -0.1282 0.5436 4.7688
##
## Random effects:
## Groups Name Variance Std.Dev.
## speaker (Intercept) 0.0000 0.0000
## Residual 0.9863 0.9931
## Number of obs: 5137, groups: speaker, 38
##
## Fixed effects:
## Estimate Std. Error df t value Pr(>|t|)
## (Intercept) 1.229e-01 2.560e-02 5.135e+03 4.800 1.63e-06 ***
## turnOverall:likeability -2.239e-04 4.333e-05 5.135e+03 -5.167 2.46e-07 ***
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation of Fixed Effects:
## (Intr)
## trnOvrll:lk -0.841
## optimizer (nloptwrap) convergence code: 0 (OK)
## boundary (singular) fit: see help('isSingular')
summary(m4 <- lmer(f0maxz ~ turnOverall : becomeFriends + (1|speaker), ipus))
## boundary (singular) fit: see help('isSingular')
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: f0maxz ~ turnOverall:becomeFriends + (1 | speaker)
## Data: ipus
##
## REML criterion at convergence: 14533.5
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -3.6748 -0.6896 -0.1252 0.5400 4.7221
##
## Random effects:
## Groups Name Variance Std.Dev.
## speaker (Intercept) 0.0000 0.0000
## Residual 0.9869 0.9935
## Number of obs: 5137, groups: speaker, 38
##
## Fixed effects:
## Estimate Std. Error df t value Pr(>|t|)
## (Intercept) 1.070e-01 2.417e-02 5.135e+03 4.428 9.72e-06 ***
## turnOverall:becomeFriends -2.202e-04 4.572e-05 5.135e+03 -4.817 1.50e-06 ***
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation of Fixed Effects:
## (Intr)
## trnOvrll:bF -0.819
## optimizer (nloptwrap) convergence code: 0 (OK)
## boundary (singular) fit: see help('isSingular')
anova(m, m1)
## refitting model(s) with ML (instead of REML)
## Warning in optwrap(optimizer, devfun, x@theta, lower = x@lower, calc.derivs =
## TRUE, : convergence code 3 from bobyqa: bobyqa -- a trust region step failed to
## reduce q
## Data: ipus
## Models:
## m: f0maxz ~ turnOverall + (1 | speaker)
## m1: f0maxz ~ turnOverall:closeness + (1 | speaker)
## npar AIC BIC logLik deviance Chisq Df Pr(>Chisq)
## m 4 14512 14538 -7251.7 14504
## m1 4 14523 14549 -7257.5 14515 0 0
anova(m, m2)
## refitting model(s) with ML (instead of REML)
## Warning in optwrap(optimizer, devfun, x@theta, lower = x@lower, calc.derivs =
## TRUE, : convergence code 3 from bobyqa: bobyqa -- a trust region step failed to
## reduce q
## Data: ipus
## Models:
## m: f0maxz ~ turnOverall + (1 | speaker)
## m2: f0maxz ~ turnOverall:similarity + (1 | speaker)
## npar AIC BIC logLik deviance Chisq Df Pr(>Chisq)
## m 4 14512 14538 -7251.7 14504
## m2 4 14515 14541 -7253.3 14507 0 0
anova(m, m3)
## refitting model(s) with ML (instead of REML)
## Warning in optwrap(optimizer, devfun, x@theta, lower = x@lower, calc.derivs =
## TRUE, : convergence code 3 from bobyqa: bobyqa -- a trust region step failed to
## reduce q
## Data: ipus
## Models:
## m: f0maxz ~ turnOverall + (1 | speaker)
## m3: f0maxz ~ turnOverall:likeability + (1 | speaker)
## npar AIC BIC logLik deviance Chisq Df Pr(>Chisq)
## m 4 14512 14538 -7251.7 14504
## m3 4 14513 14539 -7252.6 14505 0 0
anova(m, m4)
## refitting model(s) with ML (instead of REML)
## Warning in optwrap(optimizer, devfun, x@theta, lower = x@lower, calc.derivs =
## TRUE, : convergence code 3 from bobyqa: bobyqa -- a trust region step failed to
## reduce q
## Data: ipus
## Models:
## m: f0maxz ~ turnOverall + (1 | speaker)
## m4: f0maxz ~ turnOverall:becomeFriends + (1 | speaker)
## npar AIC BIC logLik deviance Chisq Df Pr(>Chisq)
## m 4 14512 14538 -7251.7 14504
## m4 4 14517 14543 -7254.3 14509 0 0
load(paste0(here::here(), "/data/speechData-allIPUs.RData"))
# ipus <- ipus |>
# filter(gender != "Male")
ggplot(ipus, aes(turnOverall, f0sdz))+
geom_point()+
geom_smooth(method="lm")+
facet_wrap(~condition)+
ggtitle("Entire experiment")
## `geom_smooth()` using formula = 'y ~ x'
## Warning: Removed 268 rows containing non-finite values (`stat_smooth()`).
## Warning: Removed 268 rows containing missing values (`geom_point()`).
summary(m <- lmer(f0sdz ~ turnOverall + (1|speaker), ipus))
## boundary (singular) fit: see help('isSingular')
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: f0sdz ~ turnOverall + (1 | speaker)
## Data: ipus
##
## REML criterion at convergence: 14251.6
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -3.2064 -0.7197 -0.0992 0.6332 5.2302
##
## Random effects:
## Groups Name Variance Std.Dev.
## speaker (Intercept) 0.00 0.00
## Residual 1.02 1.01
## Number of obs: 4980, groups: speaker, 38
##
## Fixed effects:
## Estimate Std. Error df t value Pr(>|t|)
## (Intercept) 8.632e-03 2.770e-02 4.978e+03 0.312 0.755
## turnOverall 4.886e-04 3.222e-04 4.978e+03 1.516 0.130
##
## Correlation of Fixed Effects:
## (Intr)
## turnOverall -0.856
## optimizer (nloptwrap) convergence code: 0 (OK)
## boundary (singular) fit: see help('isSingular')
summary(m1 <- lmer(f0sdz ~ turnOverall : condition + (1|speaker), ipus))
## boundary (singular) fit: see help('isSingular')
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: f0sdz ~ turnOverall:condition + (1 | speaker)
## Data: ipus
##
## REML criterion at convergence: 14264.3
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -3.1840 -0.7153 -0.0976 0.6378 5.2186
##
## Random effects:
## Groups Name Variance Std.Dev.
## speaker (Intercept) 0.00 0.00
## Residual 1.02 1.01
## Number of obs: 4980, groups: speaker, 38
##
## Fixed effects:
## Estimate Std. Error df t value Pr(>|t|)
## (Intercept) 5.495e-03 2.783e-02 4.977e+03 0.197 0.8435
## turnOverall:conditionclose 7.540e-04 3.930e-04 4.977e+03 1.919 0.0551
## turnOverall:conditionimpersonal 3.536e-04 3.419e-04 4.977e+03 1.034 0.3011
##
## (Intercept)
## turnOverall:conditionclose .
## turnOverall:conditionimpersonal
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation of Fixed Effects:
## (Intr) trnOvrll:cndtnc
## trnOvrll:cndtnc -0.753
## trnOvrll:cndtnm -0.771 0.581
## optimizer (nloptwrap) convergence code: 0 (OK)
## boundary (singular) fit: see help('isSingular')
anova(m, m1)
## refitting model(s) with ML (instead of REML)
## Data: ipus
## Models:
## m: f0sdz ~ turnOverall + (1 | speaker)
## m1: f0sdz ~ turnOverall:condition + (1 | speaker)
## npar AIC BIC logLik deviance Chisq Df Pr(>Chisq)
## m 4 14239 14265 -7115.3 14231
## m1 5 14239 14272 -7114.7 14229 1.3923 1 0.238
par(mfrow=c(2, 2))
hist(resid(m))
qqnorm(resid(m));qqline(resid(m))
plot(fitted(m), resid(m))
ggplot(ipus |> filter(task=="Lists"), aes(turnOverall, f0sdz))+
geom_point()+
geom_smooth(method="lm")+
facet_wrap(~condition)+
ggtitle("Question lists (1st part of experiment)")
## `geom_smooth()` using formula = 'y ~ x'
## Warning: Removed 56 rows containing non-finite values (`stat_smooth()`).
## Warning: Removed 56 rows containing missing values (`geom_point()`).
summary(m <- lmer(f0sdz ~ turnOverall + (1|speaker), ipus |> filter(task=="Lists")))
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: f0sdz ~ turnOverall + (1 | speaker)
## Data: filter(ipus, task == "Lists")
##
## REML criterion at convergence: 3805.3
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -2.4356 -0.6830 -0.1016 0.6261 4.2131
##
## Random effects:
## Groups Name Variance Std.Dev.
## speaker (Intercept) 0.08148 0.2854
## Residual 0.92134 0.9599
## Number of obs: 1357, groups: speaker, 38
##
## Fixed effects:
## Estimate Std. Error df t value Pr(>|t|)
## (Intercept) 1.324e-01 6.675e-02 8.774e+01 1.984 0.0504 .
## turnOverall -8.838e-03 2.020e-03 1.314e+03 -4.376 1.31e-05 ***
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation of Fixed Effects:
## (Intr)
## turnOverall -0.593
summary(m1 <- lmer(f0sdz ~ turnOverall : condition + (1|speaker), ipus |> filter(task=="Lists")))
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: f0sdz ~ turnOverall:condition + (1 | speaker)
## Data: filter(ipus, task == "Lists")
##
## REML criterion at convergence: 3813.6
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -2.4423 -0.6955 -0.1152 0.6227 4.1930
##
## Random effects:
## Groups Name Variance Std.Dev.
## speaker (Intercept) 0.07613 0.2759
## Residual 0.92253 0.9605
## Number of obs: 1357, groups: speaker, 38
##
## Fixed effects:
## Estimate Std. Error df t value
## (Intercept) 0.143081 0.066480 89.423394 2.152
## turnOverall:conditionclose -0.011669 0.003309 521.764928 -3.526
## turnOverall:conditionimpersonal -0.007833 0.002211 784.553139 -3.543
## Pr(>|t|)
## (Intercept) 0.034073 *
## turnOverall:conditionclose 0.000459 ***
## turnOverall:conditionimpersonal 0.000419 ***
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation of Fixed Effects:
## (Intr) trnOvrll:cndtnc
## trnOvrll:cndtnc -0.485
## trnOvrll:cndtnm -0.481 0.233
anova(m, m1)
## refitting model(s) with ML (instead of REML)
## Data: filter(ipus, task == "Lists")
## Models:
## m: f0sdz ~ turnOverall + (1 | speaker)
## m1: f0sdz ~ turnOverall:condition + (1 | speaker)
## npar AIC BIC logLik deviance Chisq Df Pr(>Chisq)
## m 4 3798.7 3819.5 -1895.3 3790.7
## m1 5 3799.5 3825.6 -1894.7 3789.5 1.2041 1 0.2725
par(mfrow=c(2, 2))
hist(resid(m))
qqnorm(resid(m));qqline(resid(m))
plot(fitted(m), resid(m))
ggplot(ipus |> filter(task=="Diapix"), aes(turnOverall, f0sdz))+
geom_point()+
geom_smooth(method="lm")+
facet_wrap(~condition)+
ggtitle("Diapix (2nd part of experiment)")
## `geom_smooth()` using formula = 'y ~ x'
## Warning: Removed 212 rows containing non-finite values (`stat_smooth()`).
## Warning: Removed 212 rows containing missing values (`geom_point()`).
summary(m <- lmer(f0sdz ~ turnOverall + (1|speaker), ipus |> filter(task=="Diapix")))
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: f0sdz ~ turnOverall + (1 | speaker)
## Data: filter(ipus, task == "Diapix")
##
## REML criterion at convergence: 10354.2
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -3.2442 -0.7081 -0.0770 0.6328 5.2415
##
## Random effects:
## Groups Name Variance Std.Dev.
## speaker (Intercept) 0.01369 0.117
## Residual 1.00686 1.003
## Number of obs: 3623, groups: speaker, 36
##
## Fixed effects:
## Estimate Std. Error df t value Pr(>|t|)
## (Intercept) 1.302e-01 5.327e-02 3.858e+02 2.444 0.015 *
## turnOverall -6.091e-04 5.039e-04 2.288e+03 -1.209 0.227
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation of Fixed Effects:
## (Intr)
## turnOverall -0.876
summary(m1 <- lmer(f0sdz ~ turnOverall : condition + (1|speaker), ipus |> filter(task=="Diapix")))
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: f0sdz ~ turnOverall:condition + (1 | speaker)
## Data: filter(ipus, task == "Diapix")
##
## REML criterion at convergence: 10367.4
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -3.2384 -0.7064 -0.0772 0.6340 5.2410
##
## Random effects:
## Groups Name Variance Std.Dev.
## speaker (Intercept) 0.01384 0.1176
## Residual 1.00704 1.0035
## Number of obs: 3623, groups: speaker, 36
##
## Fixed effects:
## Estimate Std. Error df t value
## (Intercept) 1.283e-01 5.360e-02 4.113e+02 2.394
## turnOverall:conditionclose -4.946e-04 6.009e-04 8.911e+02 -0.823
## turnOverall:conditionimpersonal -6.688e-04 5.314e-04 5.851e+02 -1.259
## Pr(>|t|)
## (Intercept) 0.0171 *
## turnOverall:conditionclose 0.4107
## turnOverall:conditionimpersonal 0.2087
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation of Fixed Effects:
## (Intr) trnOvrll:cndtnc
## trnOvrll:cndtnc -0.785
## trnOvrll:cndtnm -0.794 0.624
anova(m, m1)
## refitting model(s) with ML (instead of REML)
## Data: filter(ipus, task == "Diapix")
## Models:
## m: f0sdz ~ turnOverall + (1 | speaker)
## m1: f0sdz ~ turnOverall:condition + (1 | speaker)
## npar AIC BIC logLik deviance Chisq Df Pr(>Chisq)
## m 4 10343 10368 -5167.7 10335
## m1 5 10345 10376 -5167.6 10335 0.1511 1 0.6975
par(mfrow=c(2, 2))
hist(resid(m))
qqnorm(resid(m));qqline(resid(m))
plot(fitted(m), resid(m))
Checking if f0 SD changes across time in interaction with questionnaire scores: Even though many of the models are significant, their AIC is not preferred over the model without the questionnaire scores.
summary(m <- lmer(f0sdz ~ turnOverall + (1|speaker), ipus))
## boundary (singular) fit: see help('isSingular')
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: f0sdz ~ turnOverall + (1 | speaker)
## Data: ipus
##
## REML criterion at convergence: 14251.6
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -3.2064 -0.7197 -0.0992 0.6332 5.2302
##
## Random effects:
## Groups Name Variance Std.Dev.
## speaker (Intercept) 0.00 0.00
## Residual 1.02 1.01
## Number of obs: 4980, groups: speaker, 38
##
## Fixed effects:
## Estimate Std. Error df t value Pr(>|t|)
## (Intercept) 8.632e-03 2.770e-02 4.978e+03 0.312 0.755
## turnOverall 4.886e-04 3.222e-04 4.978e+03 1.516 0.130
##
## Correlation of Fixed Effects:
## (Intr)
## turnOverall -0.856
## optimizer (nloptwrap) convergence code: 0 (OK)
## boundary (singular) fit: see help('isSingular')
summary(m1 <- lmer(f0sdz ~ turnOverall : closeness + (1|speaker), ipus))
## boundary (singular) fit: see help('isSingular')
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: f0sdz ~ turnOverall:closeness + (1 | speaker)
## Data: ipus
##
## REML criterion at convergence: 14256.2
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -3.1720 -0.7223 -0.0976 0.6327 5.2175
##
## Random effects:
## Groups Name Variance Std.Dev.
## speaker (Intercept) 0.000 0.00
## Residual 1.021 1.01
## Number of obs: 4980, groups: speaker, 38
##
## Fixed effects:
## Estimate Std. Error df t value Pr(>|t|)
## (Intercept) 2.717e-02 2.178e-02 4.978e+03 1.247 0.212
## turnOverall:closeness 6.289e-05 5.926e-05 4.978e+03 1.061 0.289
##
## Correlation of Fixed Effects:
## (Intr)
## trnOvrll:cl -0.754
## optimizer (nloptwrap) convergence code: 0 (OK)
## boundary (singular) fit: see help('isSingular')
summary(m2 <- lmer(f0sdz ~ turnOverall : similarity + (1|speaker), ipus))
## boundary (singular) fit: see help('isSingular')
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: f0sdz ~ turnOverall:similarity + (1 | speaker)
## Data: ipus
##
## REML criterion at convergence: 14257.1
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -3.1771 -0.7207 -0.0971 0.6346 5.2196
##
## Random effects:
## Groups Name Variance Std.Dev.
## speaker (Intercept) 0.000 0.00
## Residual 1.021 1.01
## Number of obs: 4980, groups: speaker, 38
##
## Fixed effects:
## Estimate Std. Error df t value Pr(>|t|)
## (Intercept) 2.982e-02 2.498e-02 4.978e+03 1.194 0.233
## turnOverall:similarity 3.607e-05 4.995e-05 4.978e+03 0.722 0.470
##
## Correlation of Fixed Effects:
## (Intr)
## trnOvrll:sm -0.819
## optimizer (nloptwrap) convergence code: 0 (OK)
## boundary (singular) fit: see help('isSingular')
summary(m3 <- lmer(f0sdz ~ turnOverall : likeability + (1|speaker), ipus))
## boundary (singular) fit: see help('isSingular')
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: f0sdz ~ turnOverall:likeability + (1 | speaker)
## Data: ipus
##
## REML criterion at convergence: 14256.4
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -3.1837 -0.7210 -0.0967 0.6353 5.2240
##
## Random effects:
## Groups Name Variance Std.Dev.
## speaker (Intercept) 0.00 0.00
## Residual 1.02 1.01
## Number of obs: 4980, groups: speaker, 38
##
## Fixed effects:
## Estimate Std. Error df t value Pr(>|t|)
## (Intercept) 1.767e-02 2.638e-02 4.978e+03 0.670 0.503
## turnOverall:likeability 5.421e-05 4.462e-05 4.978e+03 1.215 0.224
##
## Correlation of Fixed Effects:
## (Intr)
## trnOvrll:lk -0.840
## optimizer (nloptwrap) convergence code: 0 (OK)
## boundary (singular) fit: see help('isSingular')
summary(m4 <- lmer(f0sdz ~ turnOverall : becomeFriends + (1|speaker), ipus))
## boundary (singular) fit: see help('isSingular')
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: f0sdz ~ turnOverall:becomeFriends + (1 | speaker)
## Data: ipus
##
## REML criterion at convergence: 14255.5
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -3.1919 -0.7221 -0.0989 0.6328 5.2284
##
## Random effects:
## Groups Name Variance Std.Dev.
## speaker (Intercept) 0.00 0.00
## Residual 1.02 1.01
## Number of obs: 4980, groups: speaker, 38
##
## Fixed effects:
## Estimate Std. Error df t value Pr(>|t|)
## (Intercept) 1.437e-02 2.491e-02 4.978e+03 0.577 0.564
## turnOverall:becomeFriends 6.978e-05 4.707e-05 4.978e+03 1.482 0.138
##
## Correlation of Fixed Effects:
## (Intr)
## trnOvrll:bF -0.818
## optimizer (nloptwrap) convergence code: 0 (OK)
## boundary (singular) fit: see help('isSingular')
anova(m, m1)
## refitting model(s) with ML (instead of REML)
## Data: ipus
## Models:
## m: f0sdz ~ turnOverall + (1 | speaker)
## m1: f0sdz ~ turnOverall:closeness + (1 | speaker)
## npar AIC BIC logLik deviance Chisq Df Pr(>Chisq)
## m 4 14239 14265 -7115.3 14231
## m1 4 14240 14266 -7115.9 14232 0 0
anova(m, m2)
## refitting model(s) with ML (instead of REML)
## Data: ipus
## Models:
## m: f0sdz ~ turnOverall + (1 | speaker)
## m2: f0sdz ~ turnOverall:similarity + (1 | speaker)
## npar AIC BIC logLik deviance Chisq Df Pr(>Chisq)
## m 4 14239 14265 -7115.3 14231
## m2 4 14240 14266 -7116.2 14232 0 0
anova(m, m3)
## refitting model(s) with ML (instead of REML)
## Data: ipus
## Models:
## m: f0sdz ~ turnOverall + (1 | speaker)
## m3: f0sdz ~ turnOverall:likeability + (1 | speaker)
## npar AIC BIC logLik deviance Chisq Df Pr(>Chisq)
## m 4 14239 14265 -7115.3 14231
## m3 4 14240 14266 -7115.8 14232 0 0
anova(m, m4)
## refitting model(s) with ML (instead of REML)
## Data: ipus
## Models:
## m: f0sdz ~ turnOverall + (1 | speaker)
## m4: f0sdz ~ turnOverall:becomeFriends + (1 | speaker)
## npar AIC BIC logLik deviance Chisq Df Pr(>Chisq)
## m 4 14239 14265 -7115.3 14231
## m4 4 14239 14265 -7115.4 14231 0 0
(These analyses are using the non-z-scored values.)
f0 median: the regression suggests a negative relationship between f0 median and IPU duration, but the residuals are very heteroskedastic and a look at the raw data shows it’s more complex than that.
ggplot(ipus, aes(f0med, ipuDur))+
geom_point()+
geom_smooth()
## `geom_smooth()` using method = 'gam' and formula = 'y ~ s(x, bs = "cs")'
## Warning: Removed 188 rows containing non-finite values (`stat_smooth()`).
## Warning: Removed 188 rows containing missing values (`geom_point()`).
summary(m <- lmer(f0med ~ ipuDur + (1|speaker), ipus))
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: f0med ~ ipuDur + (1 | speaker)
## Data: ipus
##
## REML criterion at convergence: 43151.1
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -5.3777 -0.5956 -0.0971 0.4834 6.1788
##
## Random effects:
## Groups Name Variance Std.Dev.
## speaker (Intercept) 531.5 23.05
## Residual 284.3 16.86
## Number of obs: 5060, groups: speaker, 38
##
## Fixed effects:
## Estimate Std. Error df t value Pr(>|t|)
## (Intercept) 188.8551 3.7565 37.3863 50.274 <2e-16 ***
## ipuDur -0.2791 0.1587 5022.9261 -1.759 0.0787 .
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation of Fixed Effects:
## (Intr)
## ipuDur -0.060
par(mfrow=c(2, 2))
hist(resid(m))
qqnorm(resid(m));qqline(resid(m))
plot(fitted(m), resid(m))
f0 max: it’s a more robust finding that longer IPUs have higher f0 max values.
ggplot(ipus, aes(f0max, ipuDur))+
geom_point()+
geom_smooth()
## `geom_smooth()` using method = 'gam' and formula = 'y ~ s(x, bs = "cs")'
## Warning: Removed 111 rows containing non-finite values (`stat_smooth()`).
## Warning: Removed 111 rows containing missing values (`geom_point()`).
summary(m <- lmer(f0max ~ ipuDur + (1|speaker), ipus))
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: f0max ~ ipuDur + (1 | speaker)
## Data: ipus
##
## REML criterion at convergence: 53030.9
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -3.5298 -0.6293 -0.1303 0.4745 4.7527
##
## Random effects:
## Groups Name Variance Std.Dev.
## speaker (Intercept) 1163 34.10
## Residual 1727 41.55
## Number of obs: 5137, groups: speaker, 38
##
## Fixed effects:
## Estimate Std. Error df t value Pr(>|t|)
## (Intercept) 234.2913 5.5984 38.0074 41.85 <2e-16 ***
## ipuDur 8.8831 0.3894 5103.0170 22.81 <2e-16 ***
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation of Fixed Effects:
## (Intr)
## ipuDur -0.098
par(mfrow=c(2, 2))
hist(resid(m))
qqnorm(resid(m));qqline(resid(m))
plot(fitted(m), resid(m))
f0 SD: again, a positive relation is suggested between f0 SD and IPU duration, but the plot shows it isn’t linear, and the residuals are again very heteroskedastic.
ggplot(ipus, aes(f0sd, ipuDur))+
geom_point()+
geom_smooth()
## `geom_smooth()` using method = 'gam' and formula = 'y ~ s(x, bs = "cs")'
## Warning: Removed 268 rows containing non-finite values (`stat_smooth()`).
## Warning: Removed 268 rows containing missing values (`geom_point()`).
summary(m <- lm(f0sd ~ ipuDur, ipus))
##
## Call:
## lm(formula = f0sd ~ ipuDur, data = ipus)
##
## Residuals:
## Min 1Q Median 3Q Max
## -20.127 -7.932 -1.829 5.840 48.283
##
## Coefficients:
## Estimate Std. Error t value Pr(>|t|)
## (Intercept) 19.22007 0.20696 92.87 <2e-16 ***
## ipuDur 1.11075 0.09988 11.12 <2e-16 ***
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Residual standard error: 10.79 on 4978 degrees of freedom
## (268 observations deleted due to missingness)
## Multiple R-squared: 0.02424, Adjusted R-squared: 0.02404
## F-statistic: 123.7 on 1 and 4978 DF, p-value: < 2.2e-16
par(mfrow=c(2, 2))
hist(resid(m))
qqnorm(resid(m));qqline(resid(m))
plot(fitted(m), resid(m))
ggplot(ipus, aes(turnOverall, f0medz))+
geom_point()+
geom_smooth(method="lm")+
facet_wrap(~condition)+
ggtitle("Entire experiment")
## `geom_smooth()` using formula = 'y ~ x'
## Warning: Removed 704 rows containing non-finite values (`stat_smooth()`).
## Warning: Removed 704 rows containing missing values (`geom_point()`).
summary(m <- lmer(f0medz ~ turnOverall + (1|speaker), ipus)) # the model is singular, so adding or removing random effects isn't making any difference
## boundary (singular) fit: see help('isSingular')
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: f0medz ~ turnOverall + (1 | speaker)
## Data: ipus
##
## REML criterion at convergence: 52933
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -4.3004 -0.6565 -0.1051 0.5512 4.5987
##
## Random effects:
## Groups Name Variance Std.Dev.
## speaker (Intercept) 3.752e-31 6.125e-16
## Residual 9.904e-01 9.952e-01
## Number of obs: 18708, groups: speaker, 38
##
## Fixed effects:
## Estimate Std. Error df t value Pr(>|t|)
## (Intercept) -6.820e-02 1.461e-02 1.871e+04 -4.667 3.08e-06 ***
## turnOverall 8.849e-04 1.644e-04 1.871e+04 5.382 7.47e-08 ***
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation of Fixed Effects:
## (Intr)
## turnOverall -0.867
## optimizer (nloptwrap) convergence code: 0 (OK)
## boundary (singular) fit: see help('isSingular')
summary(m1 <- lmer(f0medz ~ turnOverall : condition + (1|speaker), ipus))
## boundary (singular) fit: see help('isSingular')
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: f0medz ~ turnOverall:condition + (1 | speaker)
## Data: ipus
##
## REML criterion at convergence: 52936.7
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -4.3201 -0.6591 -0.1020 0.5531 4.6134
##
## Random effects:
## Groups Name Variance Std.Dev.
## speaker (Intercept) 3.750e-31 6.124e-16
## Residual 9.899e-01 9.949e-01
## Number of obs: 18708, groups: speaker, 38
##
## Fixed effects:
## Estimate Std. Error df t value
## (Intercept) -7.521e-02 1.475e-02 1.870e+04 -5.099
## turnOverall:conditionclose 1.287e-03 2.015e-04 1.870e+04 6.386
## turnOverall:conditionimpersonal 7.099e-04 1.720e-04 1.870e+04 4.127
## Pr(>|t|)
## (Intercept) 3.45e-07 ***
## turnOverall:conditionclose 1.74e-10 ***
## turnOverall:conditionimpersonal 3.70e-05 ***
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation of Fixed Effects:
## (Intr) trnOvrll:cndtnc
## trnOvrll:cndtnc -0.780
## trnOvrll:cndtnm -0.780 0.609
## optimizer (nloptwrap) convergence code: 0 (OK)
## boundary (singular) fit: see help('isSingular')
anova(m, m1)
## refitting model(s) with ML (instead of REML)
## Data: ipus
## Models:
## m: f0medz ~ turnOverall + (1 | speaker)
## m1: f0medz ~ turnOverall:condition + (1 | speaker)
## npar AIC BIC logLik deviance Chisq Df Pr(>Chisq)
## m 4 52917 52949 -26455 52909
## m1 5 52908 52947 -26449 52898 11.893 1 0.0005636 ***
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
par(mfrow=c(2, 2))
hist(resid(m))
qqnorm(resid(m));qqline(resid(m))
plot(fitted(m), resid(m))
ggplot(ipus |> filter(task=="Lists"), aes(turnOverall, f0medz))+
geom_point()+
geom_smooth(method="lm")+
facet_wrap(~condition)+
ggtitle("Question lists (1st part of experiment)")
## `geom_smooth()` using formula = 'y ~ x'
## Warning: Removed 120 rows containing non-finite values (`stat_smooth()`).
## Warning: Removed 120 rows containing missing values (`geom_point()`).
# ggsave(paste0(here::here(), "/figures/forMeeting/medianCondition.png"))
summary(m <- lmer(f0medz ~ turnOverall + (1|speaker), ipus |> filter(task=="Lists")))
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: f0medz ~ turnOverall + (1 | speaker)
## Data: filter(ipus, task == "Lists")
##
## REML criterion at convergence: 11311
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -3.8741 -0.6573 -0.1154 0.5114 4.1135
##
## Random effects:
## Groups Name Variance Std.Dev.
## speaker (Intercept) 0.07897 0.281
## Residual 0.99991 1.000
## Number of obs: 3952, groups: speaker, 38
##
## Fixed effects:
## Estimate Std. Error df t value Pr(>|t|)
## (Intercept) -6.770e-02 5.254e-02 5.164e+01 -1.289 0.203
## turnOverall 1.173e-03 1.289e-03 3.823e+03 0.911 0.363
##
## Correlation of Fixed Effects:
## (Intr)
## turnOverall -0.379
summary(m1 <- lmer(f0medz ~ turnOverall : condition + (1|speaker), ipus |> filter(task=="Lists")))
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: f0medz ~ turnOverall:condition + (1 | speaker)
## Data: filter(ipus, task == "Lists")
##
## REML criterion at convergence: 11320.2
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -3.8705 -0.6584 -0.1149 0.5190 4.0943
##
## Random effects:
## Groups Name Variance Std.Dev.
## speaker (Intercept) 0.07579 0.2753
## Residual 1.00024 1.0001
## Number of obs: 3952, groups: speaker, 38
##
## Fixed effects:
## Estimate Std. Error df t value
## (Intercept) -6.356e-02 5.190e-02 5.136e+01 -1.225
## turnOverall:conditionclose -2.206e-04 1.884e-03 1.902e+03 -0.117
## turnOverall:conditionimpersonal 2.241e-03 1.647e-03 2.413e+03 1.361
## Pr(>|t|)
## (Intercept) 0.226
## turnOverall:conditionclose 0.907
## turnOverall:conditionimpersonal 0.174
##
## Correlation of Fixed Effects:
## (Intr) trnOvrll:cndtnc
## trnOvrll:cndtnc -0.321
## trnOvrll:cndtnm -0.250 0.080
anova(m, m1)
## refitting model(s) with ML (instead of REML)
## Data: filter(ipus, task == "Lists")
## Models:
## m: f0medz ~ turnOverall + (1 | speaker)
## m1: f0medz ~ turnOverall:condition + (1 | speaker)
## npar AIC BIC logLik deviance Chisq Df Pr(>Chisq)
## m 4 11303 11328 -5647.7 11295
## m1 5 11304 11336 -5647.1 11294 1.0686 1 0.3013
par(mfrow=c(2, 2))
hist(resid(m))
qqnorm(resid(m));qqline(resid(m))
plot(fitted(m), resid(m))
ggplot(ipus |> filter(task=="Diapix"), aes(turnOverall, f0medz))+
geom_point()+
geom_smooth(method="lm")+
facet_wrap(~condition)+
ggtitle("Diapix (2nd part of experiment)")
## `geom_smooth()` using formula = 'y ~ x'
## Warning: Removed 584 rows containing non-finite values (`stat_smooth()`).
## Warning: Removed 584 rows containing missing values (`geom_point()`).
summary(m <- lmer(f0medz ~ turnOverall + (1|speaker), ipus |> filter(task=="Diapix")))
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: f0medz ~ turnOverall + (1 | speaker)
## Data: filter(ipus, task == "Diapix")
##
## REML criterion at convergence: 41322.1
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -4.4302 -0.6501 -0.0791 0.5608 4.4185
##
## Random effects:
## Groups Name Variance Std.Dev.
## speaker (Intercept) 0.006863 0.08284
## Residual 0.958730 0.97915
## Number of obs: 14756, groups: speaker, 36
##
## Fixed effects:
## Estimate Std. Error df t value Pr(>|t|)
## (Intercept) -1.227e-01 2.795e-02 2.602e+02 -4.392 1.64e-05 ***
## turnOverall 1.388e-03 2.481e-04 7.223e+03 5.595 2.29e-08 ***
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation of Fixed Effects:
## (Intr)
## turnOverall -0.819
summary(m1 <- lmer(f0medz ~ turnOverall : condition + (1|speaker), ipus |> filter(task=="Diapix"))) # this was the preferred model, even if the effect for both
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: f0medz ~ turnOverall:condition + (1 | speaker)
## Data: filter(ipus, task == "Diapix")
##
## REML criterion at convergence: 41331.3
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -4.4261 -0.6511 -0.0794 0.5620 4.4311
##
## Random effects:
## Groups Name Variance Std.Dev.
## speaker (Intercept) 0.005483 0.07405
## Residual 0.958811 0.97919
## Number of obs: 14756, groups: speaker, 36
##
## Fixed effects:
## Estimate Std. Error df t value
## (Intercept) -1.260e-01 2.725e-02 3.133e+02 -4.623
## turnOverall:conditionclose 1.781e-03 2.996e-04 1.048e+03 5.947
## turnOverall:conditionimpersonal 1.122e-03 2.672e-04 6.240e+02 4.200
## Pr(>|t|)
## (Intercept) 5.52e-06 ***
## turnOverall:conditionclose 3.72e-09 ***
## turnOverall:conditionimpersonal 3.06e-05 ***
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation of Fixed Effects:
## (Intr) trnOvrll:cndtnc
## trnOvrll:cndtnc -0.733
## trnOvrll:cndtnm -0.744 0.546
anova(m, m1)
## refitting model(s) with ML (instead of REML)
## Data: filter(ipus, task == "Diapix")
## Models:
## m: f0medz ~ turnOverall + (1 | speaker)
## m1: f0medz ~ turnOverall:condition + (1 | speaker)
## npar AIC BIC logLik deviance Chisq Df Pr(>Chisq)
## m 4 41309 41339 -20651 41301
## m1 5 41305 41343 -20648 41295 5.6208 1 0.01775 *
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
par(mfrow=c(2, 2))
hist(resid(m1))
qqnorm(resid(m1));qqline(resid(m1))
plot(fitted(m1), resid(m1))
Checking if f0 median changes across time in interaction with questionnaire scores: Even though many of the models are significant, their AIC is not preferred over the model without the questionnaire scores.
# entire experiment
summary(m <- lmer(f0medz ~ turnOverall + (1|speaker), ipus))
## boundary (singular) fit: see help('isSingular')
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: f0medz ~ turnOverall + (1 | speaker)
## Data: ipus
##
## REML criterion at convergence: 52933
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -4.3004 -0.6565 -0.1051 0.5512 4.5987
##
## Random effects:
## Groups Name Variance Std.Dev.
## speaker (Intercept) 3.752e-31 6.125e-16
## Residual 9.904e-01 9.952e-01
## Number of obs: 18708, groups: speaker, 38
##
## Fixed effects:
## Estimate Std. Error df t value Pr(>|t|)
## (Intercept) -6.820e-02 1.461e-02 1.871e+04 -4.667 3.08e-06 ***
## turnOverall 8.849e-04 1.644e-04 1.871e+04 5.382 7.47e-08 ***
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation of Fixed Effects:
## (Intr)
## turnOverall -0.867
## optimizer (nloptwrap) convergence code: 0 (OK)
## boundary (singular) fit: see help('isSingular')
summary(m1 <- lmer(f0medz ~ turnOverall : closeness + (1|speaker), ipus))
## boundary (singular) fit: see help('isSingular')
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: f0medz ~ turnOverall:closeness + (1 | speaker)
## Data: ipus
##
## REML criterion at convergence: 52942.2
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -4.2973 -0.6552 -0.0977 0.5479 4.5778
##
## Random effects:
## Groups Name Variance Std.Dev.
## speaker (Intercept) 3.753e-31 6.126e-16
## Residual 9.907e-01 9.954e-01
## Number of obs: 18708, groups: speaker, 38
##
## Fixed effects:
## Estimate Std. Error df t value Pr(>|t|)
## (Intercept) -4.217e-02 1.138e-02 1.871e+04 -3.706 0.000211 ***
## turnOverall:closeness 1.441e-04 2.989e-05 1.871e+04 4.821 1.44e-06 ***
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation of Fixed Effects:
## (Intr)
## trnOvrll:cl -0.769
## optimizer (nloptwrap) convergence code: 0 (OK)
## boundary (singular) fit: see help('isSingular')
summary(m2 <- lmer(f0medz ~ turnOverall : similarity + (1|speaker), ipus))
## boundary (singular) fit: see help('isSingular')
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: f0medz ~ turnOverall:similarity + (1 | speaker)
## Data: ipus
##
## REML criterion at convergence: 52954.8
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -4.2906 -0.6519 -0.1066 0.5458 4.5780
##
## Random effects:
## Groups Name Variance Std.Dev.
## speaker (Intercept) 3.756e-31 6.128e-16
## Residual 9.914e-01 9.957e-01
## Number of obs: 18708, groups: speaker, 38
##
## Fixed effects:
## Estimate Std. Error df t value Pr(>|t|)
## (Intercept) -3.603e-02 1.312e-02 1.871e+04 -2.746 0.006044 **
## turnOverall:similarity 8.371e-05 2.536e-05 1.871e+04 3.300 0.000968 ***
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation of Fixed Effects:
## (Intr)
## trnOvrll:sm -0.832
## optimizer (nloptwrap) convergence code: 0 (OK)
## boundary (singular) fit: see help('isSingular')
summary(m3 <- lmer(f0medz ~ turnOverall : likeability + (1|speaker), ipus))
## boundary (singular) fit: see help('isSingular')
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: f0medz ~ turnOverall:likeability + (1 | speaker)
## Data: ipus
##
## REML criterion at convergence: 52944.6
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -4.2836 -0.6563 -0.1004 0.5456 4.5898
##
## Random effects:
## Groups Name Variance Std.Dev.
## speaker (Intercept) 3.754e-31 6.127e-16
## Residual 9.908e-01 9.954e-01
## Number of obs: 18708, groups: speaker, 38
##
## Fixed effects:
## Estimate Std. Error df t value Pr(>|t|)
## (Intercept) -5.491e-02 1.393e-02 1.871e+04 -3.942 8.1e-05 ***
## turnOverall:likeability 1.052e-04 2.276e-05 1.871e+04 4.624 3.8e-06 ***
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation of Fixed Effects:
## (Intr)
## trnOvrll:lk -0.853
## optimizer (nloptwrap) convergence code: 0 (OK)
## boundary (singular) fit: see help('isSingular')
summary(m4 <- lmer(f0medz ~ turnOverall : becomeFriends + (1|speaker), ipus))
## boundary (singular) fit: see help('isSingular')
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: f0medz ~ turnOverall:becomeFriends + (1 | speaker)
## Data: ipus
##
## REML criterion at convergence: 52950.8
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -4.2867 -0.6518 -0.1022 0.5438 4.5826
##
## Random effects:
## Groups Name Variance Std.Dev.
## speaker (Intercept) 3.755e-31 6.128e-16
## Residual 9.912e-01 9.956e-01
## Number of obs: 18708, groups: speaker, 38
##
## Fixed effects:
## Estimate Std. Error df t value Pr(>|t|)
## (Intercept) -4.200e-02 1.305e-02 1.871e+04 -3.218 0.001294 **
## turnOverall:becomeFriends 9.237e-05 2.383e-05 1.871e+04 3.877 0.000106 ***
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation of Fixed Effects:
## (Intr)
## trnOvrll:bF -0.830
## optimizer (nloptwrap) convergence code: 0 (OK)
## boundary (singular) fit: see help('isSingular')
anova(m, m1)
## refitting model(s) with ML (instead of REML)
## Data: ipus
## Models:
## m: f0medz ~ turnOverall + (1 | speaker)
## m1: f0medz ~ turnOverall:closeness + (1 | speaker)
## npar AIC BIC logLik deviance Chisq Df Pr(>Chisq)
## m 4 52917 52949 -26455 52909
## m1 4 52923 52954 -26458 52915 0 0
anova(m, m2)
## refitting model(s) with ML (instead of REML)
## Data: ipus
## Models:
## m: f0medz ~ turnOverall + (1 | speaker)
## m2: f0medz ~ turnOverall:similarity + (1 | speaker)
## npar AIC BIC logLik deviance Chisq Df Pr(>Chisq)
## m 4 52917 52949 -26455 52909
## m2 4 52935 52967 -26464 52927 0 0
anova(m, m3)
## refitting model(s) with ML (instead of REML)
## Data: ipus
## Models:
## m: f0medz ~ turnOverall + (1 | speaker)
## m3: f0medz ~ turnOverall:likeability + (1 | speaker)
## npar AIC BIC logLik deviance Chisq Df Pr(>Chisq)
## m 4 52917 52949 -26455 52909
## m3 4 52925 52956 -26459 52917 0 0
anova(m, m4)
## refitting model(s) with ML (instead of REML)
## Data: ipus
## Models:
## m: f0medz ~ turnOverall + (1 | speaker)
## m4: f0medz ~ turnOverall:becomeFriends + (1 | speaker)
## npar AIC BIC logLik deviance Chisq Df Pr(>Chisq)
## m 4 52917 52949 -26455 52909
## m4 4 52931 52963 -26462 52923 0 0
# lists
summary(m <- lmer(f0medz ~ turnOverall + (1|speaker), ipus |> filter(task=="Lists")))
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: f0medz ~ turnOverall + (1 | speaker)
## Data: filter(ipus, task == "Lists")
##
## REML criterion at convergence: 11311
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -3.8741 -0.6573 -0.1154 0.5114 4.1135
##
## Random effects:
## Groups Name Variance Std.Dev.
## speaker (Intercept) 0.07897 0.281
## Residual 0.99991 1.000
## Number of obs: 3952, groups: speaker, 38
##
## Fixed effects:
## Estimate Std. Error df t value Pr(>|t|)
## (Intercept) -6.770e-02 5.254e-02 5.164e+01 -1.289 0.203
## turnOverall 1.173e-03 1.289e-03 3.823e+03 0.911 0.363
##
## Correlation of Fixed Effects:
## (Intr)
## turnOverall -0.379
summary(m1 <- lmer(f0medz ~ turnOverall : closeness + (1|speaker), ipus |> filter(task=="Lists")))
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: f0medz ~ turnOverall:closeness + (1 | speaker)
## Data: filter(ipus, task == "Lists")
##
## REML criterion at convergence: 11313.1
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -3.8753 -0.6555 -0.1137 0.5105 4.1143
##
## Random effects:
## Groups Name Variance Std.Dev.
## speaker (Intercept) 0.07968 0.2823
## Residual 0.99964 0.9998
## Number of obs: 3952, groups: speaker, 38
##
## Fixed effects:
## Estimate Std. Error df t value Pr(>|t|)
## (Intercept) -7.239e-02 5.204e-02 4.881e+01 -1.391 0.171
## turnOverall:closeness 3.930e-04 3.112e-04 3.022e+03 1.263 0.207
##
## Correlation of Fixed Effects:
## (Intr)
## trnOvrll:cl -0.347
summary(m2 <- lmer(f0medz ~ turnOverall : similarity + (1|speaker), ipus |> filter(task=="Lists")))
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: f0medz ~ turnOverall:similarity + (1 | speaker)
## Data: filter(ipus, task == "Lists")
##
## REML criterion at convergence: 11314.3
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -3.8733 -0.6560 -0.1194 0.5120 4.1158
##
## Random effects:
## Groups Name Variance Std.Dev.
## speaker (Intercept) 0.07749 0.2784
## Residual 1.00002 1.0000
## Number of obs: 3952, groups: speaker, 38
##
## Fixed effects:
## Estimate Std. Error df t value Pr(>|t|)
## (Intercept) -6.903e-02 5.187e-02 4.981e+01 -1.331 0.189
## turnOverall:similarity 2.232e-04 2.189e-04 3.502e+03 1.020 0.308
##
## Correlation of Fixed Effects:
## (Intr)
## trnOvrll:sm -0.369
summary(m3 <- lmer(f0medz ~ turnOverall : likeability + (1|speaker), ipus |> filter(task=="Lists")))
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: f0medz ~ turnOverall:likeability + (1 | speaker)
## Data: filter(ipus, task == "Lists")
##
## REML criterion at convergence: 11314.2
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -3.8737 -0.6547 -0.1164 0.5127 4.1201
##
## Random effects:
## Groups Name Variance Std.Dev.
## speaker (Intercept) 0.07806 0.2794
## Residual 0.99983 0.9999
## Number of obs: 3952, groups: speaker, 38
##
## Fixed effects:
## Estimate Std. Error df t value Pr(>|t|)
## (Intercept) -7.399e-02 5.230e-02 5.156e+01 -1.415 0.163
## turnOverall:likeability 2.312e-04 1.881e-04 3.700e+03 1.229 0.219
##
## Correlation of Fixed Effects:
## (Intr)
## trnOvrll:lk -0.380
summary(m4 <- lmer(f0medz ~ turnOverall : becomeFriends + (1|speaker), ipus |> filter(task=="Lists")))
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: f0medz ~ turnOverall:becomeFriends + (1 | speaker)
## Data: filter(ipus, task == "Lists")
##
## REML criterion at convergence: 11314.1
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -3.8736 -0.6547 -0.1180 0.5149 4.1146
##
## Random effects:
## Groups Name Variance Std.Dev.
## speaker (Intercept) 0.07796 0.2792
## Residual 0.99987 0.9999
## Number of obs: 3952, groups: speaker, 38
##
## Fixed effects:
## Estimate Std. Error df t value Pr(>|t|)
## (Intercept) -7.210e-02 5.195e-02 5.013e+01 -1.388 0.171
## turnOverall:becomeFriends 2.437e-04 2.052e-04 3.550e+03 1.187 0.235
##
## Correlation of Fixed Effects:
## (Intr)
## trnOvrll:bF -0.366
anova(m, m1)
## refitting model(s) with ML (instead of REML)
## Data: filter(ipus, task == "Lists")
## Models:
## m: f0medz ~ turnOverall + (1 | speaker)
## m1: f0medz ~ turnOverall:closeness + (1 | speaker)
## npar AIC BIC logLik deviance Chisq Df Pr(>Chisq)
## m 4 11303 11328 -5647.7 11295
## m1 4 11302 11328 -5647.3 11294 0.7622 0
anova(m, m2)
## refitting model(s) with ML (instead of REML)
## Data: filter(ipus, task == "Lists")
## Models:
## m: f0medz ~ turnOverall + (1 | speaker)
## m2: f0medz ~ turnOverall:similarity + (1 | speaker)
## npar AIC BIC logLik deviance Chisq Df Pr(>Chisq)
## m 4 11303 11328 -5647.7 11295
## m2 4 11303 11328 -5647.5 11295 0.2123 0
anova(m, m3)
## refitting model(s) with ML (instead of REML)
## Data: filter(ipus, task == "Lists")
## Models:
## m: f0medz ~ turnOverall + (1 | speaker)
## m3: f0medz ~ turnOverall:likeability + (1 | speaker)
## npar AIC BIC logLik deviance Chisq Df Pr(>Chisq)
## m 4 11303 11328 -5647.7 11295
## m3 4 11303 11328 -5647.3 11295 0.6837 0
anova(m, m4)
## refitting model(s) with ML (instead of REML)
## Data: filter(ipus, task == "Lists")
## Models:
## m: f0medz ~ turnOverall + (1 | speaker)
## m4: f0medz ~ turnOverall:becomeFriends + (1 | speaker)
## npar AIC BIC logLik deviance Chisq Df Pr(>Chisq)
## m 4 11303 11328 -5647.7 11295
## m4 4 11303 11328 -5647.4 11295 0.5828 0
# diapix
summary(m <- lmer(f0medz ~ turnOverall + (1|speaker), ipus |> filter(task=="Diapix")))
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: f0medz ~ turnOverall + (1 | speaker)
## Data: filter(ipus, task == "Diapix")
##
## REML criterion at convergence: 41322.1
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -4.4302 -0.6501 -0.0791 0.5608 4.4185
##
## Random effects:
## Groups Name Variance Std.Dev.
## speaker (Intercept) 0.006863 0.08284
## Residual 0.958730 0.97915
## Number of obs: 14756, groups: speaker, 36
##
## Fixed effects:
## Estimate Std. Error df t value Pr(>|t|)
## (Intercept) -1.227e-01 2.795e-02 2.602e+02 -4.392 1.64e-05 ***
## turnOverall 1.388e-03 2.481e-04 7.223e+03 5.595 2.29e-08 ***
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation of Fixed Effects:
## (Intr)
## turnOverall -0.819
summary(m1 <- lmer(f0medz ~ turnOverall : closeness + (1|speaker), ipus |> filter(task=="Diapix")))
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: f0medz ~ turnOverall:closeness + (1 | speaker)
## Data: filter(ipus, task == "Diapix")
##
## REML criterion at convergence: 41330.7
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -4.4427 -0.6518 -0.0762 0.5612 4.4271
##
## Random effects:
## Groups Name Variance Std.Dev.
## speaker (Intercept) 0.007702 0.08776
## Residual 0.958875 0.97922
## Number of obs: 14756, groups: speaker, 36
##
## Fixed effects:
## Estimate Std. Error df t value Pr(>|t|)
## (Intercept) -7.919e-02 2.346e-02 7.470e+01 -3.376 0.00117 **
## turnOverall:closeness 2.475e-04 4.816e-05 3.459e+02 5.139 4.63e-07 ***
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation of Fixed Effects:
## (Intr)
## trnOvrll:cl -0.701
summary(m2 <- lmer(f0medz ~ turnOverall : similarity + (1|speaker), ipus |> filter(task=="Diapix")))
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: f0medz ~ turnOverall:similarity + (1 | speaker)
## Data: filter(ipus, task == "Diapix")
##
## REML criterion at convergence: 41336.9
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -4.4450 -0.6505 -0.0791 0.5614 4.4198
##
## Random effects:
## Groups Name Variance Std.Dev.
## speaker (Intercept) 0.009388 0.09689
## Residual 0.958905 0.97924
## Number of obs: 14756, groups: speaker, 36
##
## Fixed effects:
## Estimate Std. Error df t value Pr(>|t|)
## (Intercept) -9.018e-02 2.730e-02 1.157e+02 -3.304 0.00127 **
## turnOverall:similarity 1.857e-04 3.982e-05 1.417e+03 4.662 3.42e-06 ***
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation of Fixed Effects:
## (Intr)
## trnOvrll:sm -0.749
summary(m3 <- lmer(f0medz ~ turnOverall : likeability + (1|speaker), ipus |> filter(task=="Diapix")))
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: f0medz ~ turnOverall:likeability + (1 | speaker)
## Data: filter(ipus, task == "Diapix")
##
## REML criterion at convergence: 41332.4
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -4.4366 -0.6522 -0.0789 0.5586 4.4240
##
## Random effects:
## Groups Name Variance Std.Dev.
## speaker (Intercept) 0.007611 0.08724
## Residual 0.958962 0.97927
## Number of obs: 14756, groups: speaker, 36
##
## Fixed effects:
## Estimate Std. Error df t value Pr(>|t|)
## (Intercept) -1.034e-01 2.727e-02 1.790e+02 -3.790 0.000206 ***
## turnOverall:likeability 1.742e-04 3.462e-05 2.983e+03 5.033 5.12e-07 ***
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation of Fixed Effects:
## (Intr)
## trnOvrll:lk -0.792
summary(m4 <- lmer(f0medz ~ turnOverall : becomeFriends + (1|speaker), ipus |> filter(task=="Diapix")))
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: f0medz ~ turnOverall:becomeFriends + (1 | speaker)
## Data: filter(ipus, task == "Diapix")
##
## REML criterion at convergence: 41337
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -4.4418 -0.6523 -0.0790 0.5623 4.4189
##
## Random effects:
## Groups Name Variance Std.Dev.
## speaker (Intercept) 0.008199 0.09055
## Residual 0.959144 0.97936
## Number of obs: 14756, groups: speaker, 36
##
## Fixed effects:
## Estimate Std. Error df t value Pr(>|t|)
## (Intercept) -8.766e-02 2.659e-02 1.232e+02 -3.297 0.00128 **
## turnOverall:becomeFriends 1.716e-04 3.755e-05 1.088e+03 4.570 5.42e-06 ***
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation of Fixed Effects:
## (Intr)
## trnOvrll:bF -0.765
anova(m, m1)
## refitting model(s) with ML (instead of REML)
## Data: filter(ipus, task == "Diapix")
## Models:
## m: f0medz ~ turnOverall + (1 | speaker)
## m1: f0medz ~ turnOverall:closeness + (1 | speaker)
## npar AIC BIC logLik deviance Chisq Df Pr(>Chisq)
## m 4 41309 41339 -20651 41301
## m1 4 41314 41345 -20653 41306 0 0
anova(m, m2)
## refitting model(s) with ML (instead of REML)
## Data: filter(ipus, task == "Diapix")
## Models:
## m: f0medz ~ turnOverall + (1 | speaker)
## m2: f0medz ~ turnOverall:similarity + (1 | speaker)
## npar AIC BIC logLik deviance Chisq Df Pr(>Chisq)
## m 4 41309 41339 -20651 41301
## m2 4 41320 41351 -20656 41312 0 0
anova(m, m3)
## refitting model(s) with ML (instead of REML)
## Data: filter(ipus, task == "Diapix")
## Models:
## m: f0medz ~ turnOverall + (1 | speaker)
## m3: f0medz ~ turnOverall:likeability + (1 | speaker)
## npar AIC BIC logLik deviance Chisq Df Pr(>Chisq)
## m 4 41309 41339 -20651 41301
## m3 4 41315 41346 -20654 41307 0 0
anova(m, m4)
## refitting model(s) with ML (instead of REML)
## Data: filter(ipus, task == "Diapix")
## Models:
## m: f0medz ~ turnOverall + (1 | speaker)
## m4: f0medz ~ turnOverall:becomeFriends + (1 | speaker)
## npar AIC BIC logLik deviance Chisq Df Pr(>Chisq)
## m 4 41309 41339 -20651 41301
## m4 4 41320 41351 -20656 41312 0 0
ggplot(ipus, aes(turnOverall, f0maxz))+
geom_point()+
geom_smooth(method="lm")+
facet_wrap(~condition)+
ggtitle("Entire experiment")
## `geom_smooth()` using formula = 'y ~ x'
## Warning: Removed 404 rows containing non-finite values (`stat_smooth()`).
## Warning: Removed 404 rows containing missing values (`geom_point()`).
summary(m <- lmer(f0maxz ~ turnOverall + (1|speaker), ipus))
## boundary (singular) fit: see help('isSingular')
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: f0maxz ~ turnOverall + (1 | speaker)
## Data: ipus
##
## REML criterion at convergence: 53607.4
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -3.9352 -0.6932 -0.1337 0.5525 5.0215
##
## Random effects:
## Groups Name Variance Std.Dev.
## speaker (Intercept) 1.047e-31 3.236e-16
## Residual 9.814e-01 9.907e-01
## Number of obs: 19008, groups: speaker, 38
##
## Fixed effects:
## Estimate Std. Error df t value Pr(>|t|)
## (Intercept) 1.802e-01 1.444e-02 1.901e+04 12.48 <2e-16 ***
## turnOverall -2.340e-03 1.626e-04 1.901e+04 -14.39 <2e-16 ***
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation of Fixed Effects:
## (Intr)
## turnOverall -0.867
## optimizer (nloptwrap) convergence code: 0 (OK)
## boundary (singular) fit: see help('isSingular')
summary(m1 <- lmer(f0maxz ~ turnOverall : condition + (1|speaker), ipus))
## boundary (singular) fit: see help('isSingular')
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: f0maxz ~ turnOverall:condition + (1 | speaker)
## Data: ipus
##
## REML criterion at convergence: 53622.7
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -3.9315 -0.6923 -0.1330 0.5525 5.0227
##
## Random effects:
## Groups Name Variance Std.Dev.
## speaker (Intercept) 1.047e-31 3.236e-16
## Residual 9.814e-01 9.907e-01
## Number of obs: 19008, groups: speaker, 38
##
## Fixed effects:
## Estimate Std. Error df t value
## (Intercept) 1.791e-01 1.458e-02 1.900e+04 12.29
## turnOverall:conditionclose -2.277e-03 1.992e-04 1.900e+04 -11.43
## turnOverall:conditionimpersonal -2.367e-03 1.702e-04 1.900e+04 -13.91
## Pr(>|t|)
## (Intercept) <2e-16 ***
## turnOverall:conditionclose <2e-16 ***
## turnOverall:conditionimpersonal <2e-16 ***
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation of Fixed Effects:
## (Intr) trnOvrll:cndtnc
## trnOvrll:cndtnc -0.781
## trnOvrll:cndtnm -0.780 0.609
## optimizer (nloptwrap) convergence code: 0 (OK)
## boundary (singular) fit: see help('isSingular')
anova(m, m1)
## refitting model(s) with ML (instead of REML)
## Data: ipus
## Models:
## m: f0maxz ~ turnOverall + (1 | speaker)
## m1: f0maxz ~ turnOverall:condition + (1 | speaker)
## npar AIC BIC logLik deviance Chisq Df Pr(>Chisq)
## m 4 53592 53623 -26792 53584
## m1 5 53593 53633 -26792 53583 0.2992 1 0.5844
par(mfrow=c(2, 2))
hist(resid(m))
qqnorm(resid(m));qqline(resid(m))
plot(fitted(m), resid(m))
ggplot(ipus |> filter(task=="Lists"), aes(turnOverall, f0maxz))+
geom_point()+
geom_smooth(method="lm")+
facet_wrap(~condition)+
ggtitle("Question lists (1st part of experiment)")
## `geom_smooth()` using formula = 'y ~ x'
## Warning: Removed 52 rows containing non-finite values (`stat_smooth()`).
## Warning: Removed 52 rows containing missing values (`geom_point()`).
# ggsave(paste0(here::here(), "/figures/forMeeting/maxCondition.png"))
summary(m <- lmer(f0maxz ~ turnOverall + (1|speaker), ipus |> filter(task=="Lists")))
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: f0maxz ~ turnOverall + (1 | speaker)
## Data: filter(ipus, task == "Lists")
##
## REML criterion at convergence: 12013.3
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -2.5050 -0.6673 -0.1654 0.4841 4.1539
##
## Random effects:
## Groups Name Variance Std.Dev.
## speaker (Intercept) 0.114 0.3376
## Residual 1.133 1.0643
## Number of obs: 4020, groups: speaker, 38
##
## Fixed effects:
## Estimate Std. Error df t value Pr(>|t|)
## (Intercept) 2.960e-01 6.135e-02 4.685e+01 4.825 1.53e-05 ***
## turnOverall -3.754e-04 1.367e-03 3.947e+03 -0.275 0.784
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation of Fixed Effects:
## (Intr)
## turnOverall -0.343
summary(m1 <- lmer(f0maxz ~ turnOverall : condition + (1|speaker), ipus |> filter(task=="Lists")))
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: f0maxz ~ turnOverall:condition + (1 | speaker)
## Data: filter(ipus, task == "Lists")
##
## REML criterion at convergence: 12023.2
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -2.5134 -0.6714 -0.1610 0.4829 4.1522
##
## Random effects:
## Groups Name Variance Std.Dev.
## speaker (Intercept) 0.1179 0.3434
## Residual 1.1325 1.0642
## Number of obs: 4020, groups: speaker, 38
##
## Fixed effects:
## Estimate Std. Error df t value
## (Intercept) 2.941e-01 6.234e-02 4.361e+01 4.718
## turnOverall:conditionclose 2.653e-04 2.012e-03 2.370e+03 0.132
## turnOverall:conditionimpersonal -8.620e-04 1.768e-03 2.869e+03 -0.488
## Pr(>|t|)
## (Intercept) 2.47e-05 ***
## turnOverall:conditionclose 0.895
## turnOverall:conditionimpersonal 0.626
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation of Fixed Effects:
## (Intr) trnOvrll:cndtnc
## trnOvrll:cndtnc -0.281
## trnOvrll:cndtnm -0.218 0.061
anova(m, m1)
## refitting model(s) with ML (instead of REML)
## Data: filter(ipus, task == "Lists")
## Models:
## m: f0maxz ~ turnOverall + (1 | speaker)
## m1: f0maxz ~ turnOverall:condition + (1 | speaker)
## npar AIC BIC logLik deviance Chisq Df Pr(>Chisq)
## m 4 12006 12031 -5999 11998
## m1 5 12008 12039 -5999 11998 0.1454 1 0.7029
par(mfrow=c(2, 2))
hist(resid(m))
qqnorm(resid(m));qqline(resid(m))
plot(fitted(m), resid(m))
ggplot(ipus |> filter(task=="Diapix"), aes(turnOverall, f0maxz))+
geom_point()+
geom_smooth(method="lm")+
facet_wrap(~condition)+
ggtitle("Diapix (2nd part of experiment)")
## `geom_smooth()` using formula = 'y ~ x'
## Warning: Removed 352 rows containing non-finite values (`stat_smooth()`).
## Warning: Removed 352 rows containing missing values (`geom_point()`).
summary(m <- lmer(f0maxz ~ turnOverall + (1|speaker), ipus |> filter(task=="Diapix")))
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: f0maxz ~ turnOverall + (1 | speaker)
## Data: filter(ipus, task == "Diapix")
##
## REML criterion at convergence: 40945.8
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -4.1123 -0.6992 -0.1036 0.5733 4.5351
##
## Random effects:
## Groups Name Variance Std.Dev.
## speaker (Intercept) 0.00703 0.08384
## Residual 0.89518 0.94614
## Number of obs: 14988, groups: speaker, 36
##
## Fixed effects:
## Estimate Std. Error df t value Pr(>|t|)
## (Intercept) -5.459e-02 2.723e-02 2.438e+02 -2.005 0.0461 *
## turnOverall -2.519e-04 2.388e-04 7.881e+03 -1.055 0.2914
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation of Fixed Effects:
## (Intr)
## turnOverall -0.809
summary(m1 <- lmer(f0maxz ~ turnOverall : condition + (1|speaker), ipus |> filter(task=="Diapix")))
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: f0maxz ~ turnOverall:condition + (1 | speaker)
## Data: filter(ipus, task == "Diapix")
##
## REML criterion at convergence: 40960
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -4.1095 -0.6986 -0.1022 0.5755 4.5276
##
## Random effects:
## Groups Name Variance Std.Dev.
## speaker (Intercept) 0.006649 0.08154
## Residual 0.895305 0.94621
## Number of obs: 14988, groups: speaker, 36
##
## Fixed effects:
## Estimate Std. Error df t value
## (Intercept) -5.571e-02 2.707e-02 2.323e+02 -2.058
## turnOverall:conditionclose -1.380e-04 2.926e-04 9.567e+02 -0.472
## turnOverall:conditionimpersonal -3.245e-04 2.626e-04 5.720e+02 -1.236
## Pr(>|t|)
## (Intercept) 0.0407 *
## turnOverall:conditionclose 0.6373
## turnOverall:conditionimpersonal 0.2170
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation of Fixed Effects:
## (Intr) trnOvrll:cndtnc
## trnOvrll:cndtnc -0.699
## trnOvrll:cndtnm -0.713 0.498
anova(m, m1)
## refitting model(s) with ML (instead of REML)
## Data: filter(ipus, task == "Diapix")
## Models:
## m: f0maxz ~ turnOverall + (1 | speaker)
## m1: f0maxz ~ turnOverall:condition + (1 | speaker)
## npar AIC BIC logLik deviance Chisq Df Pr(>Chisq)
## m 4 40933 40963 -20462 40925
## m1 5 40934 40972 -20462 40924 0.492 1 0.483
par(mfrow=c(2, 2))
hist(resid(m))
qqnorm(resid(m));qqline(resid(m))
plot(fitted(m), resid(m))
Checking if f0 SD changes across time in interaction with questionnaire scores: Even though many of the models are significant, their AIC is not preferred over the model without the questionnaire scores. One exception: f0 max seems to have decreased more for people that felt closer to their partner.
# entire experiment
summary(m <- lmer(f0maxz ~ turnOverall + (1|speaker), ipus))
## boundary (singular) fit: see help('isSingular')
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: f0maxz ~ turnOverall + (1 | speaker)
## Data: ipus
##
## REML criterion at convergence: 53607.4
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -3.9352 -0.6932 -0.1337 0.5525 5.0215
##
## Random effects:
## Groups Name Variance Std.Dev.
## speaker (Intercept) 1.047e-31 3.236e-16
## Residual 9.814e-01 9.907e-01
## Number of obs: 19008, groups: speaker, 38
##
## Fixed effects:
## Estimate Std. Error df t value Pr(>|t|)
## (Intercept) 1.802e-01 1.444e-02 1.901e+04 12.48 <2e-16 ***
## turnOverall -2.340e-03 1.626e-04 1.901e+04 -14.39 <2e-16 ***
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation of Fixed Effects:
## (Intr)
## turnOverall -0.867
## optimizer (nloptwrap) convergence code: 0 (OK)
## boundary (singular) fit: see help('isSingular')
summary(m1 <- lmer(f0maxz ~ turnOverall : closeness + (1|speaker), ipus))
## boundary (singular) fit: see help('isSingular')
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: f0maxz ~ turnOverall:closeness + (1 | speaker)
## Data: ipus
##
## REML criterion at convergence: 53751.6
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -3.9999 -0.6909 -0.1359 0.5673 5.1069
##
## Random effects:
## Groups Name Variance Std.Dev.
## speaker (Intercept) 1.055e-31 3.248e-16
## Residual 9.887e-01 9.943e-01
## Number of obs: 19008, groups: speaker, 38
##
## Fixed effects:
## Estimate Std. Error df t value Pr(>|t|)
## (Intercept) 7.001e-02 1.127e-02 1.901e+04 6.21 5.40e-10 ***
## turnOverall:closeness -2.396e-04 2.965e-05 1.901e+04 -8.08 6.85e-16 ***
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation of Fixed Effects:
## (Intr)
## trnOvrll:cl -0.769
## optimizer (nloptwrap) convergence code: 0 (OK)
## boundary (singular) fit: see help('isSingular')
summary(m2 <- lmer(f0maxz ~ turnOverall : similarity + (1|speaker), ipus))
## boundary (singular) fit: see help('isSingular')
## Warning: Model failed to converge with 1 negative eigenvalue: -6.2e+03
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: f0maxz ~ turnOverall:similarity + (1 | speaker)
## Data: ipus
##
## REML criterion at convergence: 53662.7
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -4.0109 -0.6883 -0.1325 0.5526 5.0581
##
## Random effects:
## Groups Name Variance Std.Dev.
## speaker (Intercept) 1.050e-31 3.241e-16
## Residual 9.841e-01 9.920e-01
## Number of obs: 19008, groups: speaker, 38
##
## Fixed effects:
## Estimate Std. Error df t value Pr(>|t|)
## (Intercept) 1.343e-01 1.297e-02 1.901e+04 10.36 <2e-16 ***
## turnOverall:similarity -3.123e-04 2.508e-05 1.901e+04 -12.45 <2e-16 ***
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation of Fixed Effects:
## (Intr)
## trnOvrll:sm -0.832
## optimizer (nloptwrap) convergence code: 0 (OK)
## boundary (singular) fit: see help('isSingular')
summary(m3 <- lmer(f0maxz ~ turnOverall : likeability + (1|speaker), ipus))
## boundary (singular) fit: see help('isSingular')
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: f0maxz ~ turnOverall:likeability + (1 | speaker)
## Data: ipus
##
## REML criterion at convergence: 53642
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -3.9843 -0.6939 -0.1352 0.5593 5.0404
##
## Random effects:
## Groups Name Variance Std.Dev.
## speaker (Intercept) 1.049e-31 3.239e-16
## Residual 9.830e-01 9.915e-01
## Number of obs: 19008, groups: speaker, 38
##
## Fixed effects:
## Estimate Std. Error df t value Pr(>|t|)
## (Intercept) 1.560e-01 1.378e-02 1.901e+04 11.32 <2e-16 ***
## turnOverall:likeability -2.989e-04 2.252e-05 1.901e+04 -13.27 <2e-16 ***
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation of Fixed Effects:
## (Intr)
## trnOvrll:lk -0.853
## optimizer (nloptwrap) convergence code: 0 (OK)
## boundary (singular) fit: see help('isSingular')
summary(m4 <- lmer(f0maxz ~ turnOverall : becomeFriends + (1|speaker), ipus))
## boundary (singular) fit: see help('isSingular')
## Warning: Model failed to converge with 1 negative eigenvalue: -5.8e+03
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: f0maxz ~ turnOverall:becomeFriends + (1 | speaker)
## Data: ipus
##
## REML criterion at convergence: 53674.1
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -3.9605 -0.6875 -0.1378 0.5549 5.0614
##
## Random effects:
## Groups Name Variance Std.Dev.
## speaker (Intercept) 1.051e-31 3.242e-16
## Residual 9.847e-01 9.923e-01
## Number of obs: 19008, groups: speaker, 38
##
## Fixed effects:
## Estimate Std. Error df t value Pr(>|t|)
## (Intercept) 1.285e-01 1.291e-02 1.901e+04 9.954 <2e-16 ***
## turnOverall:becomeFriends -2.830e-04 2.360e-05 1.901e+04 -11.988 <2e-16 ***
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation of Fixed Effects:
## (Intr)
## trnOvrll:bF -0.830
## optimizer (nloptwrap) convergence code: 0 (OK)
## boundary (singular) fit: see help('isSingular')
anova(m, m1)
## refitting model(s) with ML (instead of REML)
## Data: ipus
## Models:
## m: f0maxz ~ turnOverall + (1 | speaker)
## m1: f0maxz ~ turnOverall:closeness + (1 | speaker)
## npar AIC BIC logLik deviance Chisq Df Pr(>Chisq)
## m 4 53592 53623 -26792 53584
## m1 4 53733 53764 -26862 53725 0 0
anova(m, m2)
## refitting model(s) with ML (instead of REML)
## Data: ipus
## Models:
## m: f0maxz ~ turnOverall + (1 | speaker)
## m2: f0maxz ~ turnOverall:similarity + (1 | speaker)
## npar AIC BIC logLik deviance Chisq Df Pr(>Chisq)
## m 4 53592 53623 -26792 53584
## m2 4 53643 53675 -26818 53635 0 0
anova(m, m3)
## refitting model(s) with ML (instead of REML)
## Data: ipus
## Models:
## m: f0maxz ~ turnOverall + (1 | speaker)
## m3: f0maxz ~ turnOverall:likeability + (1 | speaker)
## npar AIC BIC logLik deviance Chisq Df Pr(>Chisq)
## m 4 53592 53623 -26792 53584
## m3 4 53622 53654 -26807 53614 0 0
anova(m, m4)
## refitting model(s) with ML (instead of REML)
## Data: ipus
## Models:
## m: f0maxz ~ turnOverall + (1 | speaker)
## m4: f0maxz ~ turnOverall:becomeFriends + (1 | speaker)
## npar AIC BIC logLik deviance Chisq Df Pr(>Chisq)
## m 4 53592 53623 -26792 53584
## m4 4 53655 53686 -26823 53647 0 0
# lists
summary(m <- lmer(f0maxz ~ turnOverall + (1|speaker), ipus |> filter(task=="Lists")))
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: f0maxz ~ turnOverall + (1 | speaker)
## Data: filter(ipus, task == "Lists")
##
## REML criterion at convergence: 12013.3
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -2.5050 -0.6673 -0.1654 0.4841 4.1539
##
## Random effects:
## Groups Name Variance Std.Dev.
## speaker (Intercept) 0.114 0.3376
## Residual 1.133 1.0643
## Number of obs: 4020, groups: speaker, 38
##
## Fixed effects:
## Estimate Std. Error df t value Pr(>|t|)
## (Intercept) 2.960e-01 6.135e-02 4.685e+01 4.825 1.53e-05 ***
## turnOverall -3.754e-04 1.367e-03 3.947e+03 -0.275 0.784
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation of Fixed Effects:
## (Intr)
## turnOverall -0.343
summary(m1 <- lmer(f0maxz ~ turnOverall : closeness + (1|speaker), ipus |> filter(task=="Lists")))
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: f0maxz ~ turnOverall:closeness + (1 | speaker)
## Data: filter(ipus, task == "Lists")
##
## REML criterion at convergence: 12015.6
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -2.4906 -0.6608 -0.1616 0.4832 4.1604
##
## Random effects:
## Groups Name Variance Std.Dev.
## speaker (Intercept) 0.1153 0.3396
## Residual 1.1324 1.0641
## Number of obs: 4020, groups: speaker, 38
##
## Fixed effects:
## Estimate Std. Error df t value Pr(>|t|)
## (Intercept) 2.760e-01 6.101e-02 4.448e+01 4.524 4.48e-05 ***
## turnOverall:closeness 2.453e-04 3.315e-04 3.348e+03 0.740 0.459
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation of Fixed Effects:
## (Intr)
## trnOvrll:cl -0.314
summary(m2 <- lmer(f0maxz ~ turnOverall : similarity + (1|speaker), ipus |> filter(task=="Lists")))
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: f0maxz ~ turnOverall:similarity + (1 | speaker)
## Data: filter(ipus, task == "Lists")
##
## REML criterion at convergence: 12016.8
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -2.5083 -0.6670 -0.1668 0.4835 4.1540
##
## Random effects:
## Groups Name Variance Std.Dev.
## speaker (Intercept) 0.114 0.3376
## Residual 1.133 1.0642
## Number of obs: 4020, groups: speaker, 38
##
## Fixed effects:
## Estimate Std. Error df t value Pr(>|t|)
## (Intercept) 2.974e-01 6.110e-02 4.592e+01 4.868 1.37e-05 ***
## turnOverall:similarity -8.276e-05 2.330e-04 3.744e+03 -0.355 0.722
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation of Fixed Effects:
## (Intr)
## trnOvrll:sm -0.332
summary(m3 <- lmer(f0maxz ~ turnOverall : likeability + (1|speaker), ipus |> filter(task=="Lists")))
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: f0maxz ~ turnOverall:likeability + (1 | speaker)
## Data: filter(ipus, task == "Lists")
##
## REML criterion at convergence: 12017.2
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -2.4998 -0.6668 -0.1632 0.4820 4.1558
##
## Random effects:
## Groups Name Variance Std.Dev.
## speaker (Intercept) 0.114 0.3376
## Residual 1.133 1.0643
## Number of obs: 4020, groups: speaker, 38
##
## Fixed effects:
## Estimate Std. Error df t value Pr(>|t|)
## (Intercept) 2.912e-01 6.135e-02 4.676e+01 4.747 1.98e-05 ***
## turnOverall:likeability -9.721e-06 1.997e-04 3.870e+03 -0.049 0.961
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation of Fixed Effects:
## (Intr)
## trnOvrll:lk -0.343
summary(m4 <- lmer(f0maxz ~ turnOverall : becomeFriends + (1|speaker), ipus |> filter(task=="Lists")))
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: f0maxz ~ turnOverall:becomeFriends + (1 | speaker)
## Data: filter(ipus, task == "Lists")
##
## REML criterion at convergence: 12016.6
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -2.5158 -0.6736 -0.1674 0.4821 4.1516
##
## Random effects:
## Groups Name Variance Std.Dev.
## speaker (Intercept) 0.1137 0.3371
## Residual 1.1326 1.0642
## Number of obs: 4020, groups: speaker, 38
##
## Fixed effects:
## Estimate Std. Error df t value Pr(>|t|)
## (Intercept) 3.037e-01 6.097e-02 4.578e+01 4.982 9.44e-06 ***
## turnOverall:becomeFriends -1.466e-04 2.181e-04 3.766e+03 -0.672 0.502
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation of Fixed Effects:
## (Intr)
## trnOvrll:bF -0.330
anova(m, m1)
## refitting model(s) with ML (instead of REML)
## Data: filter(ipus, task == "Lists")
## Models:
## m: f0maxz ~ turnOverall + (1 | speaker)
## m1: f0maxz ~ turnOverall:closeness + (1 | speaker)
## npar AIC BIC logLik deviance Chisq Df Pr(>Chisq)
## m 4 12006 12031 -5999.0 11998
## m1 4 12006 12031 -5998.8 11998 0.4604 0
anova(m, m2)
## refitting model(s) with ML (instead of REML)
## Data: filter(ipus, task == "Lists")
## Models:
## m: f0maxz ~ turnOverall + (1 | speaker)
## m2: f0maxz ~ turnOverall:similarity + (1 | speaker)
## npar AIC BIC logLik deviance Chisq Df Pr(>Chisq)
## m 4 12006 12031 -5999 11998
## m2 4 12006 12031 -5999 11998 0.0509 0
anova(m, m3)
## refitting model(s) with ML (instead of REML)
## Data: filter(ipus, task == "Lists")
## Models:
## m: f0maxz ~ turnOverall + (1 | speaker)
## m3: f0maxz ~ turnOverall:likeability + (1 | speaker)
## npar AIC BIC logLik deviance Chisq Df Pr(>Chisq)
## m 4 12006 12031 -5999.0 11998
## m3 4 12006 12031 -5999.1 11998 0 0
anova(m, m4)
## refitting model(s) with ML (instead of REML)
## Data: filter(ipus, task == "Lists")
## Models:
## m: f0maxz ~ turnOverall + (1 | speaker)
## m4: f0maxz ~ turnOverall:becomeFriends + (1 | speaker)
## npar AIC BIC logLik deviance Chisq Df Pr(>Chisq)
## m 4 12006 12031 -5999.0 11998
## m4 4 12006 12031 -5998.8 11998 0.3789 0
# diapix
summary(m <- lmer(f0maxz ~ turnOverall + (1|speaker), ipus |> filter(task=="Diapix")))
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: f0maxz ~ turnOverall + (1 | speaker)
## Data: filter(ipus, task == "Diapix")
##
## REML criterion at convergence: 40945.8
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -4.1123 -0.6992 -0.1036 0.5733 4.5351
##
## Random effects:
## Groups Name Variance Std.Dev.
## speaker (Intercept) 0.00703 0.08384
## Residual 0.89518 0.94614
## Number of obs: 14988, groups: speaker, 36
##
## Fixed effects:
## Estimate Std. Error df t value Pr(>|t|)
## (Intercept) -5.459e-02 2.723e-02 2.438e+02 -2.005 0.0461 *
## turnOverall -2.519e-04 2.388e-04 7.881e+03 -1.055 0.2914
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation of Fixed Effects:
## (Intr)
## turnOverall -0.809
summary(m1 <- lmer(f0maxz ~ turnOverall : closeness + (1|speaker), ipus |> filter(task=="Diapix")))
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: f0maxz ~ turnOverall:closeness + (1 | speaker)
## Data: filter(ipus, task == "Diapix")
##
## REML criterion at convergence: 40950.2
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -4.1100 -0.6996 -0.1028 0.5752 4.5187
##
## Random effects:
## Groups Name Variance Std.Dev.
## speaker (Intercept) 0.006979 0.08354
## Residual 0.895256 0.94618
## Number of obs: 14988, groups: speaker, 36
##
## Fixed effects:
## Estimate Std. Error df t value Pr(>|t|)
## (Intercept) -8.204e-02 2.242e-02 7.994e+01 -3.660 0.000451 ***
## turnOverall:closeness 1.236e-05 4.614e-05 3.616e+02 0.268 0.788943
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation of Fixed Effects:
## (Intr)
## trnOvrll:cl -0.703
summary(m2 <- lmer(f0maxz ~ turnOverall : similarity + (1|speaker), ipus |> filter(task=="Diapix")))
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: f0maxz ~ turnOverall:similarity + (1 | speaker)
## Data: filter(ipus, task == "Diapix")
##
## REML criterion at convergence: 40948.8
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -4.1155 -0.6978 -0.1008 0.5729 4.5453
##
## Random effects:
## Groups Name Variance Std.Dev.
## speaker (Intercept) 0.007156 0.08459
## Residual 0.895112 0.94610
## Number of obs: 14988, groups: speaker, 36
##
## Fixed effects:
## Estimate Std. Error df t value Pr(>|t|)
## (Intercept) -5.170e-02 2.523e-02 1.361e+02 -2.049 0.0424 *
## turnOverall:similarity -5.090e-05 3.780e-05 1.217e+03 -1.347 0.1783
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation of Fixed Effects:
## (Intr)
## trnOvrll:sm -0.770
summary(m3 <- lmer(f0maxz ~ turnOverall : likeability + (1|speaker), ipus |> filter(task=="Diapix")))
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: f0maxz ~ turnOverall:likeability + (1 | speaker)
## Data: filter(ipus, task == "Diapix")
##
## REML criterion at convergence: 40949.3
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -4.1143 -0.6980 -0.1026 0.5719 4.5401
##
## Random effects:
## Groups Name Variance Std.Dev.
## speaker (Intercept) 0.007192 0.08481
## Residual 0.895119 0.94611
## Number of obs: 14988, groups: speaker, 36
##
## Fixed effects:
## Estimate Std. Error df t value Pr(>|t|)
## (Intercept) -5.190e-02 2.629e-02 1.814e+02 -1.974 0.0499 *
## turnOverall:likeability -4.162e-05 3.328e-05 3.132e+03 -1.251 0.2112
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation of Fixed Effects:
## (Intr)
## trnOvrll:lk -0.790
summary(m4 <- lmer(f0maxz ~ turnOverall : becomeFriends + (1|speaker), ipus |> filter(task=="Diapix")))
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: f0maxz ~ turnOverall:becomeFriends + (1 | speaker)
## Data: filter(ipus, task == "Diapix")
##
## REML criterion at convergence: 40949.7
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -4.1131 -0.6987 -0.1022 0.5724 4.5333
##
## Random effects:
## Groups Name Variance Std.Dev.
## speaker (Intercept) 0.007208 0.0849
## Residual 0.895149 0.9461
## Number of obs: 14988, groups: speaker, 36
##
## Fixed effects:
## Estimate Std. Error df t value Pr(>|t|)
## (Intercept) -5.834e-02 2.529e-02 1.322e+02 -2.307 0.0226 *
## turnOverall:becomeFriends -3.605e-05 3.596e-05 1.081e+03 -1.003 0.3163
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation of Fixed Effects:
## (Intr)
## trnOvrll:bF -0.770
anova(m, m1)
## refitting model(s) with ML (instead of REML)
## Data: filter(ipus, task == "Diapix")
## Models:
## m: f0maxz ~ turnOverall + (1 | speaker)
## m1: f0maxz ~ turnOverall:closeness + (1 | speaker)
## npar AIC BIC logLik deviance Chisq Df Pr(>Chisq)
## m 4 40933 40963 -20462 40925
## m1 4 40934 40964 -20463 40926 0 0
anova(m, m2)
## refitting model(s) with ML (instead of REML)
## Data: filter(ipus, task == "Diapix")
## Models:
## m: f0maxz ~ turnOverall + (1 | speaker)
## m2: f0maxz ~ turnOverall:similarity + (1 | speaker)
## npar AIC BIC logLik deviance Chisq Df Pr(>Chisq)
## m 4 40933 40963 -20462 40925
## m2 4 40932 40962 -20462 40924 0.6856 0
anova(m, m3)
## refitting model(s) with ML (instead of REML)
## Data: filter(ipus, task == "Diapix")
## Models:
## m: f0maxz ~ turnOverall + (1 | speaker)
## m3: f0maxz ~ turnOverall:likeability + (1 | speaker)
## npar AIC BIC logLik deviance Chisq Df Pr(>Chisq)
## m 4 40933 40963 -20462 40925
## m3 4 40932 40963 -20462 40924 0.4272 0
anova(m, m4)
## refitting model(s) with ML (instead of REML)
## Data: filter(ipus, task == "Diapix")
## Models:
## m: f0maxz ~ turnOverall + (1 | speaker)
## m4: f0maxz ~ turnOverall:becomeFriends + (1 | speaker)
## npar AIC BIC logLik deviance Chisq Df Pr(>Chisq)
## m 4 40933 40963 -20462 40925
## m4 4 40933 40963 -20462 40925 0 0
ggplot(ipus, aes(turnOverall, f0sdz))+
geom_point()+
geom_smooth(method="lm")+
facet_wrap(~condition)+
ggtitle("Entire experiment")
## `geom_smooth()` using formula = 'y ~ x'
## Warning: Removed 1004 rows containing non-finite values (`stat_smooth()`).
## Warning: Removed 1004 rows containing missing values (`geom_point()`).
summary(m <- lmer(f0sdz ~ turnOverall + (1|speaker), ipus))
## boundary (singular) fit: see help('isSingular')
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: f0sdz ~ turnOverall + (1 | speaker)
## Data: ipus
##
## REML criterion at convergence: 52109.6
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -3.3061 -0.7011 -0.0974 0.6416 5.1757
##
## Random effects:
## Groups Name Variance Std.Dev.
## speaker (Intercept) 9.064e-33 9.521e-17
## Residual 9.918e-01 9.959e-01
## Number of obs: 18408, groups: speaker, 38
##
## Fixed effects:
## Estimate Std. Error df t value Pr(>|t|)
## (Intercept) 1.264e-02 1.472e-02 1.841e+04 0.859 0.390
## turnOverall -1.639e-04 1.655e-04 1.841e+04 -0.991 0.322
##
## Correlation of Fixed Effects:
## (Intr)
## turnOverall -0.867
## optimizer (nloptwrap) convergence code: 0 (OK)
## boundary (singular) fit: see help('isSingular')
summary(m1 <- lmer(f0sdz ~ turnOverall : condition + (1|speaker), ipus))
## boundary (singular) fit: see help('isSingular')
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: f0sdz ~ turnOverall:condition + (1 | speaker)
## Data: ipus
##
## REML criterion at convergence: 52114.3
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -3.2757 -0.7035 -0.0942 0.6419 5.1616
##
## Random effects:
## Groups Name Variance Std.Dev.
## speaker (Intercept) 9.059e-33 9.518e-17
## Residual 9.913e-01 9.956e-01
## Number of obs: 18408, groups: speaker, 38
##
## Fixed effects:
## Estimate Std. Error df t value
## (Intercept) 5.992e-03 1.485e-02 1.840e+04 0.403
## turnOverall:conditionclose 2.232e-04 2.030e-04 1.840e+04 1.100
## turnOverall:conditionimpersonal -3.322e-04 1.731e-04 1.840e+04 -1.919
## Pr(>|t|)
## (Intercept) 0.687
## turnOverall:conditionclose 0.271
## turnOverall:conditionimpersonal 0.055 .
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation of Fixed Effects:
## (Intr) trnOvrll:cndtnc
## trnOvrll:cndtnc -0.779
## trnOvrll:cndtnm -0.780 0.608
## optimizer (nloptwrap) convergence code: 0 (OK)
## boundary (singular) fit: see help('isSingular')
anova(m, m1)
## refitting model(s) with ML (instead of REML)
## Data: ipus
## Models:
## m: f0sdz ~ turnOverall + (1 | speaker)
## m1: f0sdz ~ turnOverall:condition + (1 | speaker)
## npar AIC BIC logLik deviance Chisq Df Pr(>Chisq)
## m 4 52094 52125 -26043 52086
## m1 5 52085 52124 -26038 52075 10.834 1 0.0009965 ***
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
par(mfrow=c(2, 2))
hist(resid(m))
qqnorm(resid(m));qqline(resid(m))
plot(fitted(m), resid(m))
ggplot(ipus |> filter(task=="Lists"), aes(turnOverall, f0sdz))+
geom_point()+
geom_smooth(method="lm")+
facet_wrap(~condition)+
ggtitle("Question lists (1st part of experiment)")
## `geom_smooth()` using formula = 'y ~ x'
## Warning: Removed 156 rows containing non-finite values (`stat_smooth()`).
## Warning: Removed 156 rows containing missing values (`geom_point()`).
# ggsave(paste0(here::here(), "/figures/forMeeting/sdCondition.png"))
summary(m <- lmer(f0sdz ~ turnOverall + (1|speaker), ipus |> filter(task=="Lists")))
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: f0sdz ~ turnOverall + (1 | speaker)
## Data: filter(ipus, task == "Lists")
##
## REML criterion at convergence: 10427.9
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -2.6763 -0.6572 -0.0888 0.6277 3.6239
##
## Random effects:
## Groups Name Variance Std.Dev.
## speaker (Intercept) 0.1777 0.4215
## Residual 0.8119 0.9010
## Number of obs: 3916, groups: speaker, 38
##
## Fixed effects:
## Estimate Std. Error df t value Pr(>|t|)
## (Intercept) 9.657e-02 7.238e-02 4.209e+01 1.334 0.189
## turnOverall -6.234e-03 1.178e-03 3.913e+03 -5.291 1.28e-07 ***
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation of Fixed Effects:
## (Intr)
## turnOverall -0.249
summary(m1 <- lmer(f0sdz ~ turnOverall : condition + (1|speaker), ipus |> filter(task=="Lists")))
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: f0sdz ~ turnOverall:condition + (1 | speaker)
## Data: filter(ipus, task == "Lists")
##
## REML criterion at convergence: 10433.3
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -2.6347 -0.6463 -0.0878 0.6161 3.6816
##
## Random effects:
## Groups Name Variance Std.Dev.
## speaker (Intercept) 0.1645 0.4055
## Residual 0.8116 0.9009
## Number of obs: 3916, groups: speaker, 38
##
## Fixed effects:
## Estimate Std. Error df t value
## (Intercept) 1.047e-01 7.003e-02 4.187e+01 1.496
## turnOverall:conditionclose -9.139e-03 1.759e-03 3.322e+03 -5.197
## turnOverall:conditionimpersonal -4.023e-03 1.538e-03 3.567e+03 -2.615
## Pr(>|t|)
## (Intercept) 0.14227
## turnOverall:conditionclose 2.15e-07 ***
## turnOverall:conditionimpersonal 0.00896 **
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation of Fixed Effects:
## (Intr) trnOvrll:cndtnc
## trnOvrll:cndtnc -0.212
## trnOvrll:cndtnm -0.163 0.035
anova(m, m1)
## refitting model(s) with ML (instead of REML)
## Data: filter(ipus, task == "Lists")
## Models:
## m: f0sdz ~ turnOverall + (1 | speaker)
## m1: f0sdz ~ turnOverall:condition + (1 | speaker)
## npar AIC BIC logLik deviance Chisq Df Pr(>Chisq)
## m 4 10421 10446 -5206.4 10413
## m1 5 10418 10449 -5203.9 10408 4.9395 1 0.02625 *
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
par(mfrow=c(2, 2))
hist(resid(m1))
qqnorm(resid(m1));qqline(resid(m1))
plot(fitted(m1), resid(m1))
ggplot(ipus |> filter(task=="Diapix"), aes(turnOverall, f0sdz))+
geom_point()+
geom_smooth(method="lm")+
facet_wrap(~condition)+
ggtitle("Diapix (2nd part of experiment)")
## `geom_smooth()` using formula = 'y ~ x'
## Warning: Removed 848 rows containing non-finite values (`stat_smooth()`).
## Warning: Removed 848 rows containing missing values (`geom_point()`).
summary(m <- lmer(f0sdz ~ turnOverall + (1|speaker), ipus |> filter(task=="Diapix")))
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: f0sdz ~ turnOverall + (1 | speaker)
## Data: filter(ipus, task == "Diapix")
##
## REML criterion at convergence: 40934.7
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -3.3765 -0.7068 -0.0697 0.6413 5.1812
##
## Random effects:
## Groups Name Variance Std.Dev.
## speaker (Intercept) 0.01269 0.1127
## Residual 0.98114 0.9905
## Number of obs: 14492, groups: speaker, 36
##
## Fixed effects:
## Estimate Std. Error df t value Pr(>|t|)
## (Intercept) 6.401e-02 3.125e-02 1.713e+02 2.048 0.04208 *
## turnOverall -6.827e-04 2.553e-04 1.036e+04 -2.674 0.00752 **
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation of Fixed Effects:
## (Intr)
## turnOverall -0.754
summary(m1 <- lmer(f0sdz ~ turnOverall : condition + (1|speaker), ipus |> filter(task=="Diapix")))
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: f0sdz ~ turnOverall:condition + (1 | speaker)
## Data: filter(ipus, task == "Diapix")
##
## REML criterion at convergence: 40948.9
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -3.3765 -0.7064 -0.0693 0.6411 5.1811
##
## Random effects:
## Groups Name Variance Std.Dev.
## speaker (Intercept) 0.01293 0.1137
## Residual 0.98117 0.9905
## Number of obs: 14492, groups: speaker, 36
##
## Fixed effects:
## Estimate Std. Error df t value
## (Intercept) 6.403e-02 3.138e-02 1.384e+02 2.040
## turnOverall:conditionclose -6.808e-04 3.226e-04 9.392e+02 -2.111
## turnOverall:conditionimpersonal -6.849e-04 2.924e-04 5.732e+02 -2.342
## Pr(>|t|)
## (Intercept) 0.0432 *
## turnOverall:conditionclose 0.0351 *
## turnOverall:conditionimpersonal 0.0195 *
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation of Fixed Effects:
## (Intr) trnOvrll:cndtnc
## trnOvrll:cndtnc -0.618
## trnOvrll:cndtnm -0.637 0.394
anova(m, m1)
## refitting model(s) with ML (instead of REML)
## Data: filter(ipus, task == "Diapix")
## Models:
## m: f0sdz ~ turnOverall + (1 | speaker)
## m1: f0sdz ~ turnOverall:condition + (1 | speaker)
## npar AIC BIC logLik deviance Chisq Df Pr(>Chisq)
## m 4 40922 40952 -20457 40914
## m1 5 40924 40962 -20457 40914 0.0107 1 0.9176
par(mfrow=c(2, 2))
hist(resid(m))
qqnorm(resid(m));qqline(resid(m))
plot(fitted(m), resid(m))
Checking if f0 SD changes across time in interaction with questionnaire scores: Even though many of the models are significant, their AIC is not preferred over the model without the questionnaire scores.
# entire experiment
summary(m <- lmer(f0sdz ~ turnOverall + (1|speaker), ipus))
## boundary (singular) fit: see help('isSingular')
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: f0sdz ~ turnOverall + (1 | speaker)
## Data: ipus
##
## REML criterion at convergence: 52109.6
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -3.3061 -0.7011 -0.0974 0.6416 5.1757
##
## Random effects:
## Groups Name Variance Std.Dev.
## speaker (Intercept) 9.064e-33 9.521e-17
## Residual 9.918e-01 9.959e-01
## Number of obs: 18408, groups: speaker, 38
##
## Fixed effects:
## Estimate Std. Error df t value Pr(>|t|)
## (Intercept) 1.264e-02 1.472e-02 1.841e+04 0.859 0.390
## turnOverall -1.639e-04 1.655e-04 1.841e+04 -0.991 0.322
##
## Correlation of Fixed Effects:
## (Intr)
## turnOverall -0.867
## optimizer (nloptwrap) convergence code: 0 (OK)
## boundary (singular) fit: see help('isSingular')
summary(m1 <- lmer(f0sdz ~ turnOverall : closeness + (1|speaker), ipus))
## boundary (singular) fit: see help('isSingular')
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: f0sdz ~ turnOverall:closeness + (1 | speaker)
## Data: ipus
##
## REML criterion at convergence: 52113.6
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -3.3174 -0.6999 -0.0974 0.6432 5.1780
##
## Random effects:
## Groups Name Variance Std.Dev.
## speaker (Intercept) 9.065e-33 9.521e-17
## Residual 9.918e-01 9.959e-01
## Number of obs: 18408, groups: speaker, 38
##
## Fixed effects:
## Estimate Std. Error df t value Pr(>|t|)
## (Intercept) -5.575e-03 1.145e-02 1.841e+04 -0.487 0.626
## turnOverall:closeness 1.905e-05 3.005e-05 1.841e+04 0.634 0.526
##
## Correlation of Fixed Effects:
## (Intr)
## trnOvrll:cl -0.768
## optimizer (nloptwrap) convergence code: 0 (OK)
## boundary (singular) fit: see help('isSingular')
summary(m2 <- lmer(f0sdz ~ turnOverall : similarity + (1|speaker), ipus))
## boundary (singular) fit: see help('isSingular')
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: f0sdz ~ turnOverall:similarity + (1 | speaker)
## Data: ipus
##
## REML criterion at convergence: 52108.4
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -3.3083 -0.7027 -0.0975 0.6392 5.1813
##
## Random effects:
## Groups Name Variance Std.Dev.
## speaker (Intercept) 9.062e-33 9.519e-17
## Residual 9.915e-01 9.958e-01
## Number of obs: 18408, groups: speaker, 38
##
## Fixed effects:
## Estimate Std. Error df t value Pr(>|t|)
## (Intercept) 2.679e-02 1.321e-02 1.841e+04 2.028 0.0425 *
## turnOverall:similarity -6.220e-05 2.550e-05 1.841e+04 -2.440 0.0147 *
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation of Fixed Effects:
## (Intr)
## trnOvrll:sm -0.831
## optimizer (nloptwrap) convergence code: 0 (OK)
## boundary (singular) fit: see help('isSingular')
summary(m3 <- lmer(f0sdz ~ turnOverall : likeability + (1|speaker), ipus))
## boundary (singular) fit: see help('isSingular')
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: f0sdz ~ turnOverall:likeability + (1 | speaker)
## Data: ipus
##
## REML criterion at convergence: 52113.4
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -3.3124 -0.6993 -0.0968 0.6399 5.1774
##
## Random effects:
## Groups Name Variance Std.Dev.
## speaker (Intercept) 9.064e-33 9.521e-17
## Residual 9.918e-01 9.959e-01
## Number of obs: 18408, groups: speaker, 38
##
## Fixed effects:
## Estimate Std. Error df t value Pr(>|t|)
## (Intercept) 1.281e-02 1.403e-02 1.841e+04 0.913 0.361
## turnOverall:likeability -2.454e-05 2.291e-05 1.841e+04 -1.071 0.284
##
## Correlation of Fixed Effects:
## (Intr)
## trnOvrll:lk -0.852
## optimizer (nloptwrap) convergence code: 0 (OK)
## boundary (singular) fit: see help('isSingular')
summary(m4 <- lmer(f0sdz ~ turnOverall : becomeFriends + (1|speaker), ipus))
## boundary (singular) fit: see help('isSingular')
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: f0sdz ~ turnOverall:becomeFriends + (1 | speaker)
## Data: ipus
##
## REML criterion at convergence: 52113.9
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -3.3126 -0.7006 -0.0975 0.6403 5.1770
##
## Random effects:
## Groups Name Variance Std.Dev.
## speaker (Intercept) 9.064e-33 9.521e-17
## Residual 9.918e-01 9.959e-01
## Number of obs: 18408, groups: speaker, 38
##
## Fixed effects:
## Estimate Std. Error df t value Pr(>|t|)
## (Intercept) 7.953e-03 1.315e-02 1.841e+04 0.605 0.545
## turnOverall:becomeFriends -1.749e-05 2.400e-05 1.841e+04 -0.729 0.466
##
## Correlation of Fixed Effects:
## (Intr)
## trnOvrll:bF -0.830
## optimizer (nloptwrap) convergence code: 0 (OK)
## boundary (singular) fit: see help('isSingular')
anova(m, m1)
## refitting model(s) with ML (instead of REML)
## Data: ipus
## Models:
## m: f0sdz ~ turnOverall + (1 | speaker)
## m1: f0sdz ~ turnOverall:closeness + (1 | speaker)
## npar AIC BIC logLik deviance Chisq Df Pr(>Chisq)
## m 4 52094 52125 -26043 52086
## m1 4 52095 52126 -26043 52087 0 0
anova(m, m2)
## refitting model(s) with ML (instead of REML)
## Data: ipus
## Models:
## m: f0sdz ~ turnOverall + (1 | speaker)
## m2: f0sdz ~ turnOverall:similarity + (1 | speaker)
## npar AIC BIC logLik deviance Chisq Df Pr(>Chisq)
## m 4 52094 52125 -26043 52086
## m2 4 52089 52120 -26041 52081 4.9691 0
anova(m, m3)
## refitting model(s) with ML (instead of REML)
## Data: ipus
## Models:
## m: f0sdz ~ turnOverall + (1 | speaker)
## m3: f0sdz ~ turnOverall:likeability + (1 | speaker)
## npar AIC BIC logLik deviance Chisq Df Pr(>Chisq)
## m 4 52094 52125 -26043 52086
## m3 4 52094 52125 -26043 52086 0.1662 0
anova(m, m4)
## refitting model(s) with ML (instead of REML)
## Data: ipus
## Models:
## m: f0sdz ~ turnOverall + (1 | speaker)
## m4: f0sdz ~ turnOverall:becomeFriends + (1 | speaker)
## npar AIC BIC logLik deviance Chisq Df Pr(>Chisq)
## m 4 52094 52125 -26043 52086
## m4 4 52094 52126 -26043 52086 0 0
# lists
summary(m <- lmer(f0sdz ~ turnOverall + (1|speaker), ipus |> filter(task=="Lists")))
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: f0sdz ~ turnOverall + (1 | speaker)
## Data: filter(ipus, task == "Lists")
##
## REML criterion at convergence: 10427.9
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -2.6763 -0.6572 -0.0888 0.6277 3.6239
##
## Random effects:
## Groups Name Variance Std.Dev.
## speaker (Intercept) 0.1777 0.4215
## Residual 0.8119 0.9010
## Number of obs: 3916, groups: speaker, 38
##
## Fixed effects:
## Estimate Std. Error df t value Pr(>|t|)
## (Intercept) 9.657e-02 7.238e-02 4.209e+01 1.334 0.189
## turnOverall -6.234e-03 1.178e-03 3.913e+03 -5.291 1.28e-07 ***
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation of Fixed Effects:
## (Intr)
## turnOverall -0.249
summary(m1 <- lmer(f0sdz ~ turnOverall : closeness + (1|speaker), ipus |> filter(task=="Lists")))
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: f0sdz ~ turnOverall:closeness + (1 | speaker)
## Data: filter(ipus, task == "Lists")
##
## REML criterion at convergence: 10447.9
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -2.6342 -0.6530 -0.0904 0.6175 3.5981
##
## Random effects:
## Groups Name Variance Std.Dev.
## speaker (Intercept) 0.1733 0.4163
## Residual 0.8157 0.9031
## Number of obs: 3916, groups: speaker, 38
##
## Fixed effects:
## Estimate Std. Error df t value Pr(>|t|)
## (Intercept) 5.523e-02 7.122e-02 4.127e+01 0.775 0.44251
## turnOverall:closeness -9.427e-04 2.881e-04 3.788e+03 -3.273 0.00108 **
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation of Fixed Effects:
## (Intr)
## trnOvrll:cl -0.232
summary(m2 <- lmer(f0sdz ~ turnOverall : similarity + (1|speaker), ipus |> filter(task=="Lists")))
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: f0sdz ~ turnOverall:similarity + (1 | speaker)
## Data: filter(ipus, task == "Lists")
##
## REML criterion at convergence: 10430.3
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -2.7049 -0.6482 -0.0863 0.6338 3.6281
##
## Random effects:
## Groups Name Variance Std.Dev.
## speaker (Intercept) 0.1820 0.4266
## Residual 0.8115 0.9008
## Number of obs: 3916, groups: speaker, 38
##
## Fixed effects:
## Estimate Std. Error df t value Pr(>|t|)
## (Intercept) 9.553e-02 7.301e-02 4.161e+01 1.308 0.198
## turnOverall:similarity -1.090e-03 2.021e-04 3.888e+03 -5.395 7.26e-08 ***
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation of Fixed Effects:
## (Intr)
## trnOvrll:sm -0.240
summary(m3 <- lmer(f0sdz ~ turnOverall : likeability + (1|speaker), ipus |> filter(task=="Lists")))
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: f0sdz ~ turnOverall:likeability + (1 | speaker)
## Data: filter(ipus, task == "Lists")
##
## REML criterion at convergence: 10436.6
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -2.6656 -0.6535 -0.0748 0.6216 3.6164
##
## Random effects:
## Groups Name Variance Std.Dev.
## speaker (Intercept) 0.1768 0.4205
## Residual 0.8129 0.9016
## Number of obs: 3916, groups: speaker, 38
##
## Fixed effects:
## Estimate Std. Error df t value Pr(>|t|)
## (Intercept) 8.803e-02 7.223e-02 4.212e+01 1.219 0.23
## turnOverall:likeability -8.289e-04 1.726e-04 3.905e+03 -4.803 1.62e-06 ***
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation of Fixed Effects:
## (Intr)
## trnOvrll:lk -0.250
summary(m4 <- lmer(f0sdz ~ turnOverall : becomeFriends + (1|speaker), ipus |> filter(task=="Lists")))
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: f0sdz ~ turnOverall:becomeFriends + (1 | speaker)
## Data: filter(ipus, task == "Lists")
##
## REML criterion at convergence: 10431.1
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -2.6920 -0.6552 -0.0768 0.6144 3.6217
##
## Random effects:
## Groups Name Variance Std.Dev.
## speaker (Intercept) 0.1764 0.420
## Residual 0.8119 0.901
## Number of obs: 3916, groups: speaker, 38
##
## Fixed effects:
## Estimate Std. Error df t value Pr(>|t|)
## (Intercept) 9.341e-02 7.197e-02 4.168e+01 1.298 0.201
## turnOverall:becomeFriends -1.006e-03 1.887e-04 3.889e+03 -5.329 1.04e-07 ***
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation of Fixed Effects:
## (Intr)
## trnOvrll:bF -0.241
anova(m, m1)
## refitting model(s) with ML (instead of REML)
## Data: filter(ipus, task == "Lists")
## Models:
## m: f0sdz ~ turnOverall + (1 | speaker)
## m1: f0sdz ~ turnOverall:closeness + (1 | speaker)
## npar AIC BIC logLik deviance Chisq Df Pr(>Chisq)
## m 4 10421 10446 -5206.4 10413
## m1 4 10438 10463 -5214.9 10430 0 0
anova(m, m2)
## refitting model(s) with ML (instead of REML)
## Data: filter(ipus, task == "Lists")
## Models:
## m: f0sdz ~ turnOverall + (1 | speaker)
## m2: f0sdz ~ turnOverall:similarity + (1 | speaker)
## npar AIC BIC logLik deviance Chisq Df Pr(>Chisq)
## m 4 10421 10446 -5206.4 10413
## m2 4 10420 10445 -5205.8 10412 1.0541 0
anova(m, m3)
## refitting model(s) with ML (instead of REML)
## Data: filter(ipus, task == "Lists")
## Models:
## m: f0sdz ~ turnOverall + (1 | speaker)
## m3: f0sdz ~ turnOverall:likeability + (1 | speaker)
## npar AIC BIC logLik deviance Chisq Df Pr(>Chisq)
## m 4 10421 10446 -5206.4 10413
## m3 4 10426 10451 -5208.8 10418 0 0
anova(m, m4)
## refitting model(s) with ML (instead of REML)
## Data: filter(ipus, task == "Lists")
## Models:
## m: f0sdz ~ turnOverall + (1 | speaker)
## m4: f0sdz ~ turnOverall:becomeFriends + (1 | speaker)
## npar AIC BIC logLik deviance Chisq Df Pr(>Chisq)
## m 4 10421 10446 -5206.4 10413
## m4 4 10420 10445 -5206.2 10412 0.4093 0
# diapix
summary(m <- lmer(f0sdz ~ turnOverall + (1|speaker), ipus |> filter(task=="Diapix")))
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: f0sdz ~ turnOverall + (1 | speaker)
## Data: filter(ipus, task == "Diapix")
##
## REML criterion at convergence: 40934.7
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -3.3765 -0.7068 -0.0697 0.6413 5.1812
##
## Random effects:
## Groups Name Variance Std.Dev.
## speaker (Intercept) 0.01269 0.1127
## Residual 0.98114 0.9905
## Number of obs: 14492, groups: speaker, 36
##
## Fixed effects:
## Estimate Std. Error df t value Pr(>|t|)
## (Intercept) 6.401e-02 3.125e-02 1.713e+02 2.048 0.04208 *
## turnOverall -6.827e-04 2.553e-04 1.036e+04 -2.674 0.00752 **
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation of Fixed Effects:
## (Intr)
## turnOverall -0.754
summary(m1 <- lmer(f0sdz ~ turnOverall : closeness + (1|speaker), ipus |> filter(task=="Diapix")))
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: f0sdz ~ turnOverall:closeness + (1 | speaker)
## Data: filter(ipus, task == "Diapix")
##
## REML criterion at convergence: 40940.8
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -3.4000 -0.7074 -0.0682 0.6407 5.1852
##
## Random effects:
## Groups Name Variance Std.Dev.
## speaker (Intercept) 0.01393 0.1180
## Residual 0.98115 0.9905
## Number of obs: 14492, groups: speaker, 36
##
## Fixed effects:
## Estimate Std. Error df t value Pr(>|t|)
## (Intercept) 3.808e-02 2.770e-02 7.245e+01 1.375 0.1735
## turnOverall:closeness -1.091e-04 5.186e-05 7.115e+02 -2.104 0.0357 *
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation of Fixed Effects:
## (Intr)
## trnOvrll:cl -0.637
summary(m2 <- lmer(f0sdz ~ turnOverall : similarity + (1|speaker), ipus |> filter(task=="Diapix")))
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: f0sdz ~ turnOverall:similarity + (1 | speaker)
## Data: filter(ipus, task == "Diapix")
##
## REML criterion at convergence: 40928.7
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -3.3818 -0.7086 -0.0712 0.6429 5.1714
##
## Random effects:
## Groups Name Variance Std.Dev.
## speaker (Intercept) 0.01275 0.1129
## Residual 0.98047 0.9902
## Number of obs: 14492, groups: speaker, 36
##
## Fixed effects:
## Estimate Std. Error df t value Pr(>|t|)
## (Intercept) 8.786e-02 2.949e-02 1.160e+02 2.979 0.00352 **
## turnOverall:similarity -1.690e-04 4.113e-05 2.189e+03 -4.110 4.1e-05 ***
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation of Fixed Effects:
## (Intr)
## trnOvrll:sm -0.717
summary(m3 <- lmer(f0sdz ~ turnOverall : likeability + (1|speaker), ipus |> filter(task=="Diapix")))
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: f0sdz ~ turnOverall:likeability + (1 | speaker)
## Data: filter(ipus, task == "Diapix")
##
## REML criterion at convergence: 40936
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -3.3852 -0.7065 -0.0678 0.6435 5.1781
##
## Random effects:
## Groups Name Variance Std.Dev.
## speaker (Intercept) 0.01314 0.1146
## Residual 0.98089 0.9904
## Number of obs: 14492, groups: speaker, 36
##
## Fixed effects:
## Estimate Std. Error df t value Pr(>|t|)
## (Intercept) 7.133e-02 3.056e-02 1.385e+02 2.334 0.02104 *
## turnOverall:likeability -1.128e-04 3.586e-05 5.352e+03 -3.146 0.00166 **
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation of Fixed Effects:
## (Intr)
## trnOvrll:lk -0.732
summary(m4 <- lmer(f0sdz ~ turnOverall : becomeFriends + (1|speaker), ipus |> filter(task=="Diapix")))
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: f0sdz ~ turnOverall:becomeFriends + (1 | speaker)
## Data: filter(ipus, task == "Diapix")
##
## REML criterion at convergence: 40937.5
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -3.3840 -0.7067 -0.0708 0.6432 5.1820
##
## Random effects:
## Groups Name Variance Std.Dev.
## speaker (Intercept) 0.01354 0.1164
## Residual 0.98094 0.9904
## Number of obs: 14492, groups: speaker, 36
##
## Fixed effects:
## Estimate Std. Error df t value Pr(>|t|)
## (Intercept) 6.210e-02 2.996e-02 1.107e+02 2.073 0.0405 *
## turnOverall:becomeFriends -1.130e-04 3.931e-05 2.089e+03 -2.873 0.0041 **
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation of Fixed Effects:
## (Intr)
## trnOvrll:bF -0.710
anova(m, m1)
## refitting model(s) with ML (instead of REML)
## Data: filter(ipus, task == "Diapix")
## Models:
## m: f0sdz ~ turnOverall + (1 | speaker)
## m1: f0sdz ~ turnOverall:closeness + (1 | speaker)
## npar AIC BIC logLik deviance Chisq Df Pr(>Chisq)
## m 4 40922 40952 -20457 40914
## m1 4 40925 40955 -20459 40917 0 0
anova(m, m2)
## refitting model(s) with ML (instead of REML)
## Data: filter(ipus, task == "Diapix")
## Models:
## m: f0sdz ~ turnOverall + (1 | speaker)
## m2: f0sdz ~ turnOverall:similarity + (1 | speaker)
## npar AIC BIC logLik deviance Chisq Df Pr(>Chisq)
## m 4 40922 40952 -20457 40914
## m2 4 40912 40943 -20452 40904 9.7319 0
anova(m, m3)
## refitting model(s) with ML (instead of REML)
## Data: filter(ipus, task == "Diapix")
## Models:
## m: f0sdz ~ turnOverall + (1 | speaker)
## m3: f0sdz ~ turnOverall:likeability + (1 | speaker)
## npar AIC BIC logLik deviance Chisq Df Pr(>Chisq)
## m 4 40922 40952 -20457 40914
## m3 4 40919 40950 -20456 40911 2.6889 0
anova(m, m4)
## refitting model(s) with ML (instead of REML)
## Data: filter(ipus, task == "Diapix")
## Models:
## m: f0sdz ~ turnOverall + (1 | speaker)
## m4: f0sdz ~ turnOverall:becomeFriends + (1 | speaker)
## npar AIC BIC logLik deviance Chisq Df Pr(>Chisq)
## m 4 40922 40952 -20457 40914
## m4 4 40921 40951 -20457 40913 0.978 0
tos <- data.frame(matrix(nrow=0, ncol=5))
names(tos) <- c("f0feature", "ROI", "section", "direction", "temperatureAffectsF0")
for(r in unique(ipus$ROI)){
# f0 median
c <- broom.mixed::tidy(m <- lmer(f0medz ~ turnOverall + (1|speaker), ipus |> filter(!is.na(tempChangeEntireExp), ROI==r)))
c1 <- broom.mixed::tidy(m1 <- lmer(f0medz ~ turnOverall * tempChangeEntireExp + (1|speaker), ipus |> filter(!is.na(tempChangeEntireExp), ROI==r)))
a <- anova(m, m1)
tos[nrow(tos)+1,] <- c("f0median", r, "entireExp",
ifelse(c1$estimate[4] < 0, "negative", "positive"),
ifelse(a$AIC[2] - a$AIC[1] <= -2 & c1$p.value[4] < 0.05, "affects", "doesnt"))
if(nrow(ipus |> filter(!is.na(tempChangeLists), ROI==r, task=="Lists")) > 0){
c <- broom.mixed::tidy(m <- lmer(f0medz ~ turnOverall + (1|speaker), ipus |> filter(!is.na(tempChangeLists), ROI==r, task=="Lists")))
c1 <- broom.mixed::tidy(m1 <- lmer(f0medz ~ turnOverall * tempChangeLists + (1|speaker), ipus |> filter(!is.na(tempChangeLists), ROI==r, task=="Lists")))
a <- anova(m, m1)
tos[nrow(tos)+1,] <- c("f0median", r, "Lists",
ifelse(c1$estimate[4] < 0, "negative", "positive"),
ifelse(a$AIC[2] - a$AIC[1] <= -2 & c1$p.value[4] < 0.05, "affects", "doesnt"))
}
c <- broom.mixed::tidy(m <- lmer(f0medz ~ turnOverall + (1|speaker), ipus |> filter(!is.na(tempChangeDiapix), ROI==r, task=="Diapix")))
c1 <- broom.mixed::tidy(m1 <- lmer(f0medz ~ turnOverall * tempChangeDiapix + (1|speaker), ipus |> filter(!is.na(tempChangeDiapix), ROI==r, task=="Diapix")))
a <- anova(m, m1)
tos[nrow(tos)+1,] <- c("f0median", r, "Diapix",
ifelse(c1$estimate[4] < 0, "negative", "positive"),
ifelse(a$AIC[2] - a$AIC[1] <= -2 & c1$p.value[4] < 0.05, "affects", "doesnt"))
# f0 max
c <- broom.mixed::tidy(m <- lmer(f0maxz ~ turnOverall + (1|speaker), ipus |> filter(!is.na(tempChangeEntireExp), ROI==r)))
c1 <- broom.mixed::tidy(m1 <- lmer(f0maxz ~ turnOverall * tempChangeEntireExp + (1|speaker), ipus |> filter(!is.na(tempChangeEntireExp), ROI==r)))
a <- anova(m, m1)
tos[nrow(tos)+1,] <- c("f0max", r, "entireExp",
ifelse(c1$estimate[4] < 0, "negative", "positive"),
ifelse(a$AIC[2] - a$AIC[1] <= -2 & c1$p.value[4] < 0.05, "affects", "doesnt"))
if(nrow(ipus |> filter(!is.na(tempChangeLists), ROI==r, task=="Lists")) > 0){
c <- broom.mixed::tidy(m <- lmer(f0maxz ~ turnOverall + (1|speaker), ipus |> filter(!is.na(tempChangeLists), ROI==r, task=="Lists")))
c1 <- broom.mixed::tidy(m1 <- lmer(f0maxz ~ turnOverall * tempChangeLists + (1|speaker), ipus |> filter(!is.na(tempChangeLists), ROI==r, task=="Lists")))
a <- anova(m, m1)
tos[nrow(tos)+1,] <- c("f0max", r, "Lists",
ifelse(c1$estimate[4] < 0, "negative", "positive"),
ifelse(a$AIC[2] - a$AIC[1] <= -2 & c1$p.value[4] < 0.05, "affects", "doesnt"))
}
c <- broom.mixed::tidy(m <- lmer(f0maxz ~ turnOverall + (1|speaker), ipus |> filter(!is.na(tempChangeDiapix), ROI==r, task=="Diapix")))
c1 <- broom.mixed::tidy(m1 <- lmer(f0maxz ~ turnOverall * tempChangeDiapix + (1|speaker), ipus |> filter(!is.na(tempChangeDiapix), ROI==r, task=="Diapix")))
a <- anova(m, m1)
tos[nrow(tos)+1,] <- c("f0max", r, "Diapix",
ifelse(c1$estimate[4] < 0, "negative", "positive"),
ifelse(a$AIC[2] - a$AIC[1] <= -2 & c1$p.value[4] < 0.05, "affects", "doesnt"))
# f0 SD
c <- broom.mixed::tidy(m <- lmer(f0sdz ~ turnOverall + (1|speaker), ipus |> filter(!is.na(tempChangeEntireExp), ROI==r)))
c1 <- broom.mixed::tidy(m1 <- lmer(f0sdz ~ turnOverall * tempChangeEntireExp + (1|speaker), ipus |> filter(!is.na(tempChangeEntireExp), ROI==r)))
a <- anova(m, m1)
tos[nrow(tos)+1,] <- c("f0sd", r, "entireExp",
ifelse(c1$estimate[4] < 0, "negative", "positive"),
ifelse(a$AIC[2] - a$AIC[1] <= -2 & c1$p.value[4] < 0.05, "affects", "doesnt"))
if(nrow(ipus |> filter(!is.na(tempChangeLists), ROI==r, task=="Lists")) > 0){
c <- broom.mixed::tidy(m <- lmer(f0sdz ~ turnOverall + (1|speaker), ipus |> filter(!is.na(tempChangeLists), ROI==r, task=="Lists")))
c1 <- broom.mixed::tidy(m1 <- lmer(f0sdz ~ turnOverall * tempChangeLists + (1|speaker), ipus |> filter(!is.na(tempChangeLists), ROI==r, task=="Lists")))
a <- anova(m, m1)
tos[nrow(tos)+1,] <- c("f0sd", r, "Lists",
ifelse(c1$estimate[4] < 0, "negative", "positive"),
ifelse(a$AIC[2] - a$AIC[1] <= -2 & c1$p.value[4] < 0.05, "affects", "doesnt"))
}
c <- broom.mixed::tidy(m <- lmer(f0sdz ~ turnOverall + (1|speaker), ipus |> filter(!is.na(tempChangeDiapix), ROI==r, task=="Diapix")))
c1 <- broom.mixed::tidy(m1 <- lmer(f0sdz ~ turnOverall * tempChangeDiapix + (1|speaker), ipus |> filter(!is.na(tempChangeDiapix), ROI==r, task=="Diapix")))
a <- anova(m, m1)
tos[nrow(tos)+1,] <- c("f0sd", r, "Diapix",
ifelse(c1$estimate[4] < 0, "negative", "positive"),
ifelse(a$AIC[2] - a$AIC[1] <= -2 & c1$p.value[4] < 0.05, "affects", "doesnt"))
}
## boundary (singular) fit: see help('isSingular')
## boundary (singular) fit: see help('isSingular')
## refitting model(s) with ML (instead of REML)
## refitting model(s) with ML (instead of REML)
## boundary (singular) fit: see help('isSingular')
## boundary (singular) fit: see help('isSingular')
## refitting model(s) with ML (instead of REML)
## boundary (singular) fit: see help('isSingular')
## boundary (singular) fit: see help('isSingular')
## refitting model(s) with ML (instead of REML)
## refitting model(s) with ML (instead of REML)
## boundary (singular) fit: see help('isSingular')
## boundary (singular) fit: see help('isSingular')
## refitting model(s) with ML (instead of REML)
## boundary (singular) fit: see help('isSingular')
## boundary (singular) fit: see help('isSingular')
## refitting model(s) with ML (instead of REML)
## refitting model(s) with ML (instead of REML)
## refitting model(s) with ML (instead of REML)
## boundary (singular) fit: see help('isSingular')
## boundary (singular) fit: see help('isSingular')
## refitting model(s) with ML (instead of REML)
## refitting model(s) with ML (instead of REML)
## boundary (singular) fit: see help('isSingular')
## boundary (singular) fit: see help('isSingular')
## refitting model(s) with ML (instead of REML)
## Warning in optwrap(optimizer, devfun, x@theta, lower = x@lower, calc.derivs =
## TRUE, : convergence code 3 from bobyqa: bobyqa -- a trust region step failed to
## reduce q
## boundary (singular) fit: see help('isSingular')
## boundary (singular) fit: see help('isSingular')
## refitting model(s) with ML (instead of REML)
## refitting model(s) with ML (instead of REML)
## boundary (singular) fit: see help('isSingular')
## boundary (singular) fit: see help('isSingular')
## refitting model(s) with ML (instead of REML)
## boundary (singular) fit: see help('isSingular')
## boundary (singular) fit: see help('isSingular')
## refitting model(s) with ML (instead of REML)
## refitting model(s) with ML (instead of REML)
## boundary (singular) fit: see help('isSingular')
## refitting model(s) with ML (instead of REML)
## boundary (singular) fit: see help('isSingular')
## boundary (singular) fit: see help('isSingular')
## refitting model(s) with ML (instead of REML)
## refitting model(s) with ML (instead of REML)
## boundary (singular) fit: see help('isSingular')
## boundary (singular) fit: see help('isSingular')
## refitting model(s) with ML (instead of REML)
## boundary (singular) fit: see help('isSingular')
## boundary (singular) fit: see help('isSingular')
## refitting model(s) with ML (instead of REML)
## refitting model(s) with ML (instead of REML)
## boundary (singular) fit: see help('isSingular')
## boundary (singular) fit: see help('isSingular')
## refitting model(s) with ML (instead of REML)
## boundary (singular) fit: see help('isSingular')
## boundary (singular) fit: see help('isSingular')
## refitting model(s) with ML (instead of REML)
## refitting model(s) with ML (instead of REML)
## refitting model(s) with ML (instead of REML)
## boundary (singular) fit: see help('isSingular')
## boundary (singular) fit: see help('isSingular')
## refitting model(s) with ML (instead of REML)
## refitting model(s) with ML (instead of REML)
## boundary (singular) fit: see help('isSingular')
## boundary (singular) fit: see help('isSingular')
## refitting model(s) with ML (instead of REML)
## boundary (singular) fit: see help('isSingular')
## boundary (singular) fit: see help('isSingular')
## refitting model(s) with ML (instead of REML)
## refitting model(s) with ML (instead of REML)
## boundary (singular) fit: see help('isSingular')
## boundary (singular) fit: see help('isSingular')
## refitting model(s) with ML (instead of REML)
## Warning in optwrap(optimizer, devfun, x@theta, lower = x@lower, calc.derivs =
## TRUE, : convergence code 3 from bobyqa: bobyqa -- a trust region step failed to
## reduce q
## boundary (singular) fit: see help('isSingular')
## boundary (singular) fit: see help('isSingular')
## refitting model(s) with ML (instead of REML)
## refitting model(s) with ML (instead of REML)
## refitting model(s) with ML (instead of REML)
print("These are the significant effects of temperature on f0 (per ROI and f0 feature):")
## [1] "These are the significant effects of temperature on f0 (per ROI and f0 feature):"
print(tos |> filter(temperatureAffectsF0=="affects") |> arrange(section))
## f0feature ROI section direction temperatureAffectsF0
## 1 f0sd Eyes entireExp negative affects
## 2 f0median Nose entireExp negative affects
## 3 f0median Cheeks entireExp negative affects
## 4 f0max Cheeks entireExp negative affects
## 5 f0sd Cheeks entireExp negative affects
(following Levitan & Hirschberg 2011’s method)
load(paste0(here::here(), "/data/speechData.RData"))
all <- data.frame(matrix(nrow=0, ncol=9))
names(all) <- c("speaker", "type", "direction", "coefficient", "mock", "real", "condition", "feature", "section")
ggplot(dat, aes(turnOverall, f0medDiff))+
geom_point()+
geom_smooth(method="lm")+
facet_wrap(~speaker)
## `geom_smooth()` using formula = 'y ~ x'
## Warning: Removed 430 rows containing non-finite values (`stat_smooth()`).
## Warning: Removed 430 rows containing missing values (`geom_point()`).
corS <- data.frame(matrix(nrow=0, ncol=4))
names(corS) <- c("speaker", "type", "r", "p")
for(s in unique(dat$speaker)){
d <- dat |> filter(speaker==s)
c <- cor.test(d$f0medDiff, d$turnOverall,
alternative="two.sided", method="pearson")
corS[nrow(corS)+1,] <- c(s, "real", c$estimate, c$p.value)
for(i in 1:10){
c <- cor.test(d[,paste0("mockMedDiff", i)], d$turnOverall,
alternative="two.sided", method="pearson")
corS[nrow(corS)+1,] <- c(s, "mock", c$estimate, c$p.value)
}
}
corS <- corS |>
mutate(sign = ifelse(p < 0.05, "*", NA),
direction = case_when(
r < 0 ~ "convergence",
r > 0 ~ "divergence"))
count <- data.frame(matrix(nrow=0, ncol=6))
names(count) <- c("speaker", "type", "direction", "coefficient", "mock", "real")
for(s in unique(corS$speaker)){
count[nrow(count)+1,] <- c(s,
"convergence",
corS$direction[corS$speaker==s & corS$type=="real"],
corS$r[corS$speaker==s & corS$type=="real"],
sum(!is.na(corS$sign[corS$speaker==s & corS$type=="mock"])==TRUE),
ifelse(corS$sign[corS$speaker==s & corS$type=="real"]=="*", "yes", "no"))
}
count <- merge(count, dat |> select(condition, speaker) |> filter(!duplicated(speaker)), by ="speaker")
count <- count |>
mutate_at("mock", as.numeric) |>
arrange(mock) |>
mutate(feature = "f0median",
section = "entireExp",
real = case_when(
mock > 1 | is.na(real) ~ "no",
.default = as.character(real)
))
all <- rbind(all, count)
Below we see the only 3 speakers whose correlation was significant between time and real f0 median differences AND who had no more than 1 significant correlation between time and randomly ordered f0 median.
One converged and two diverged on f0 median. All from the impersonal condition.
print(count)
## speaker type direction coefficient mock real condition
## 1 HAG-A convergence divergence 0.0585051544624926 0 no close
## 2 HBR-A convergence convergence -0.0251212645486692 0 no impersonal
## 3 HUJ-B convergence convergence -0.00750409423012982 0 no close
## 4 KPB-A convergence convergence -0.0354923347870457 0 no close
## 5 KPB-B convergence divergence 0.0191921570743888 0 no close
## 6 NLO-B convergence divergence 0.0345706069155182 0 no close
## 7 OAL-B convergence convergence -0.124834077506019 0 no impersonal
## 8 OXQ-B convergence divergence 0.166665816388928 0 yes impersonal
## 9 QRT-B convergence convergence -0.0836923992008052 0 no close
## 10 SGB-A convergence convergence -0.0219420233127176 0 no impersonal
## 11 SUK-A convergence convergence -0.0393606102048184 0 no impersonal
## 12 SUK-B convergence convergence -0.131940400166196 0 no impersonal
## 13 TTN-B convergence convergence -0.101531413290794 0 no close
## 14 TTY-A convergence divergence 0.0580479706097723 0 no close
## 15 VDE-B convergence divergence 0.00622333715956697 0 no impersonal
## 16 FWR-A convergence divergence 0.19142983203941 1 no close
## 17 HBR-B convergence divergence 0.189733700626366 1 yes impersonal
## 18 KDA-B convergence divergence 0.0999034595944067 1 no impersonal
## 19 OXQ-A convergence convergence -0.0666762187913525 1 no impersonal
## 20 QRT-A convergence convergence -0.154659734620579 1 no close
## 21 SGB-B convergence convergence -0.163349472489718 1 no impersonal
## 22 TTN-A convergence divergence 0.0854815432161278 1 no close
## 23 TTY-B convergence convergence -0.105368664223288 1 no close
## 24 ZNV-B convergence divergence 0.0457135841426055 1 no close
## 25 FWR-B convergence convergence -0.448622846844869 2 no close
## 26 FXO-B convergence convergence -0.337329882306175 2 no impersonal
## 27 KDA-A convergence divergence 0.0491004567553633 2 no impersonal
## 28 HAG-B convergence convergence -0.243303988703851 3 no close
## 29 HUJ-A convergence divergence 0.22341521128362 3 no close
## 30 OAL-A convergence convergence -0.122336636013981 3 no impersonal
## 31 AML-B convergence convergence -0.264839601839081 5 no impersonal
## 32 MJG-B convergence divergence 0.254146876437062 5 no close
## 33 NLO-A convergence divergence 0.251454148243944 5 no close
## 34 ZNV-A convergence convergence -0.191693549932247 5 no close
## 35 MJG-A convergence divergence 0.315877637701108 6 no close
## 36 FXO-A convergence convergence -0.371862250906732 7 no impersonal
## 37 AML-A convergence convergence -0.0530817180998742 8 no impersonal
## 38 VDE-A convergence divergence 0.14295996583943 10 no impersonal
## feature section
## 1 f0median entireExp
## 2 f0median entireExp
## 3 f0median entireExp
## 4 f0median entireExp
## 5 f0median entireExp
## 6 f0median entireExp
## 7 f0median entireExp
## 8 f0median entireExp
## 9 f0median entireExp
## 10 f0median entireExp
## 11 f0median entireExp
## 12 f0median entireExp
## 13 f0median entireExp
## 14 f0median entireExp
## 15 f0median entireExp
## 16 f0median entireExp
## 17 f0median entireExp
## 18 f0median entireExp
## 19 f0median entireExp
## 20 f0median entireExp
## 21 f0median entireExp
## 22 f0median entireExp
## 23 f0median entireExp
## 24 f0median entireExp
## 25 f0median entireExp
## 26 f0median entireExp
## 27 f0median entireExp
## 28 f0median entireExp
## 29 f0median entireExp
## 30 f0median entireExp
## 31 f0median entireExp
## 32 f0median entireExp
## 33 f0median entireExp
## 34 f0median entireExp
## 35 f0median entireExp
## 36 f0median entireExp
## 37 f0median entireExp
## 38 f0median entireExp
Since we could argue that the speakers converge to each other only in the beginning, and that in the second part of the experiment the difference between f0s doesn’t keep decreasing (or increasing)
ggplot(dat |> filter(task=="Lists"), aes(turnOverall, f0medDiff))+
geom_point()+
geom_smooth(method="lm")+
facet_wrap(~speaker)
## `geom_smooth()` using formula = 'y ~ x'
## Warning: Removed 133 rows containing non-finite values (`stat_smooth()`).
## Warning: Removed 133 rows containing missing values (`geom_point()`).
corS <- data.frame(matrix(nrow=0, ncol=4))
names(corS) <- c("speaker", "type", "r", "p")
for(s in unique(dat$speaker)){
d <- dat |> filter(speaker==s, task=="Lists")
c <- cor.test(d$f0medDiff, d$turnOverall,
alternative="two.sided", method="pearson")
corS[nrow(corS)+1,] <- c(s, "real", c$estimate, c$p.value)
for(i in 1:10){
c <- cor.test(d[,paste0("mockMedDiff", i)], d$turnOverall,
alternative="two.sided", method="pearson")
corS[nrow(corS)+1,] <- c(s, "mock", c$estimate, c$p.value)
}
}
corS <- corS |>
mutate(sign = ifelse(p < 0.05, "*", NA),
direction = case_when(
r < 0 ~ "convergence",
r > 0 ~ "divergence"))
count <- data.frame(matrix(nrow=0, ncol=6))
names(count) <- c("speaker", "type", "direction", "coefficient", "mock", "real")
for(s in unique(corS$speaker)){
count[nrow(count)+1,] <- c(s,
"convergence",
corS$direction[corS$speaker==s & corS$type=="real"],
corS$r[corS$speaker==s & corS$type=="real"],
sum(!is.na(corS$sign[corS$speaker==s & corS$type=="mock"])==TRUE),
ifelse(corS$sign[corS$speaker==s & corS$type=="real"]=="*", "yes", "no"))
}
count <- merge(count, dat |> select(condition, speaker) |> filter(!duplicated(speaker)), by ="speaker")
count <- count |>
mutate_at("mock", as.numeric) |>
arrange(mock) |>
mutate(feature = "f0median",
section = "Lists",
real = case_when(
mock > 1 | is.na(real) ~ "no",
.default = as.character(real)
))
all <- rbind(all, count)
Below we see the only 2 speakers whose correlation was significant between time and real f0 median differences AND who had no more than 1 significant correlation between time and randomly ordered f0 median.
One converged (in the close condition) and one diverged (impersonal condition) on f0 median. These 2 are different from the 3 displayed above.
print(count)
## speaker type direction coefficient mock real condition
## 1 AML-A convergence divergence 0.143000566597349 0 no impersonal
## 2 FXO-A convergence divergence 0.107264785824985 0 no impersonal
## 3 FXO-B convergence divergence 0.154868071811506 0 no impersonal
## 4 HAG-B convergence divergence 0.147333240890105 0 no close
## 5 HBR-A convergence convergence -0.0538049458431317 0 no impersonal
## 6 HUJ-B convergence divergence 0.0250877420306285 0 no close
## 7 KDA-A convergence convergence -0.0714128403969355 0 no impersonal
## 8 KDA-B convergence divergence 0.0665630238984155 0 no impersonal
## 9 KPB-A convergence divergence 0.455765691195585 0 no close
## 10 MJG-A convergence convergence -0.0763107248583225 0 no close
## 11 MJG-B convergence divergence 0.161956129858464 0 no close
## 12 NLO-A convergence convergence -0.231340378328207 0 no close
## 13 OAL-A convergence divergence 0.203285282194069 0 no impersonal
## 14 OAL-B convergence convergence -0.106492016692283 0 no impersonal
## 15 OXQ-A convergence convergence -0.125378327692747 0 no impersonal
## 16 TTN-B convergence convergence -0.163205163334084 0 no close
## 17 TTY-B convergence divergence 0.100533526945519 0 no close
## 18 VDE-A convergence divergence 0.0556163450077026 0 no impersonal
## 19 ZNV-B convergence divergence 0.0561102812902435 0 no close
## 20 AML-B convergence convergence -0.289261443218117 1 no impersonal
## 21 FWR-A convergence divergence 0.19142983203941 1 no close
## 22 HAG-A convergence convergence -0.00197330763268993 1 no close
## 23 KPB-B convergence convergence -0.348824035411028 1 no close
## 24 NLO-B convergence convergence -0.215678209395366 1 no close
## 25 OXQ-B convergence divergence 0.181828667052807 1 no impersonal
## 26 QRT-A convergence divergence 0.192868891385498 1 no close
## 27 QRT-B convergence convergence -0.253776399953452 1 no close
## 28 SGB-B convergence convergence -0.0311039296351395 1 no impersonal
## 29 TTY-A convergence convergence -0.130624378856547 1 no close
## 30 ZNV-A convergence convergence -0.144478973054806 1 no close
## 31 FWR-B convergence convergence -0.448622846844869 2 no close
## 32 HUJ-A convergence divergence 0.500424842133659 2 no close
## 33 SUK-A convergence divergence 0.258140370359425 2 no impersonal
## 34 VDE-B convergence convergence -0.436894436925545 2 no impersonal
## 35 SUK-B convergence divergence 0.290701759954789 3 no impersonal
## 36 TTN-A convergence divergence 0.369258867205108 3 no close
## 37 HBR-B convergence divergence 0.0183314479061065 4 no impersonal
## 38 SGB-A convergence divergence 0.325654821030138 6 no impersonal
## feature section
## 1 f0median Lists
## 2 f0median Lists
## 3 f0median Lists
## 4 f0median Lists
## 5 f0median Lists
## 6 f0median Lists
## 7 f0median Lists
## 8 f0median Lists
## 9 f0median Lists
## 10 f0median Lists
## 11 f0median Lists
## 12 f0median Lists
## 13 f0median Lists
## 14 f0median Lists
## 15 f0median Lists
## 16 f0median Lists
## 17 f0median Lists
## 18 f0median Lists
## 19 f0median Lists
## 20 f0median Lists
## 21 f0median Lists
## 22 f0median Lists
## 23 f0median Lists
## 24 f0median Lists
## 25 f0median Lists
## 26 f0median Lists
## 27 f0median Lists
## 28 f0median Lists
## 29 f0median Lists
## 30 f0median Lists
## 31 f0median Lists
## 32 f0median Lists
## 33 f0median Lists
## 34 f0median Lists
## 35 f0median Lists
## 36 f0median Lists
## 37 f0median Lists
## 38 f0median Lists
Since we could argue that the speakers converge to each other more in the second part of the experiment, after they already got to know each other (and like each other or not) in the first part.
ggplot(dat |> filter(task=="Diapix"), aes(turnOverall, f0medDiff))+
geom_point()+
geom_smooth(method="lm")+
facet_wrap(~speaker)
## `geom_smooth()` using formula = 'y ~ x'
## Warning: Removed 297 rows containing non-finite values (`stat_smooth()`).
## Warning: Removed 297 rows containing missing values (`geom_point()`).
corS <- data.frame(matrix(nrow=0, ncol=4))
names(corS) <- c("speaker", "type", "r", "p")
for(s in unique(dat$speaker)){
d <- dat |> filter(speaker==s, task=="Diapix")
if(nrow(d) ==0){next} # because we don't have the Diapix files for one dyad
c <- cor.test(d$f0medDiff, d$turnOverall,
alternative="two.sided", method="pearson")
corS[nrow(corS)+1,] <- c(s, "real", c$estimate, c$p.value)
for(i in 1:10){
c <- cor.test(d[,paste0("mockMedDiff", i)], d$turnOverall,
alternative="two.sided", method="pearson")
corS[nrow(corS)+1,] <- c(s, "mock", c$estimate, c$p.value)
}
}
corS <- corS |>
mutate(sign = ifelse(p < 0.05, "*", NA),
direction = case_when(
r < 0 ~ "convergence",
r > 0 ~ "divergence"))
count <- data.frame(matrix(nrow=0, ncol=6))
names(count) <- c("speaker", "type", "direction", "coefficient", "mock", "real")
for(s in unique(corS$speaker)){
count[nrow(count)+1,] <- c(s,
"convergence",
corS$direction[corS$speaker==s & corS$type=="real"],
corS$r[corS$speaker==s & corS$type=="real"],
sum(!is.na(corS$sign[corS$speaker==s & corS$type=="mock"])==TRUE),
ifelse(corS$sign[corS$speaker==s & corS$type=="real"]=="*", "yes", "no"))
}
count <- merge(count, dat |> select(condition, speaker) |> filter(!duplicated(speaker)), by ="speaker")
count <- count |>
mutate_at("mock", as.numeric) |>
arrange(mock) |>
mutate(feature = "f0median",
section = "Diapix",
real = case_when(
mock > 1 | is.na(real) ~ "no",
.default = as.character(real)
))
all <- rbind(all, count)
Below we see the only speaker whose correlation was significant between time and real f0 median differences AND who had no more than 1 significant correlation between time and randomly ordered f0 median.
She was in the close condition and converged to her partner.
print(count)
## speaker type direction coefficient mock real condition
## 1 AML-A convergence convergence -0.116165929038044 0 no impersonal
## 2 FXO-B convergence convergence -0.00255870671399689 0 no impersonal
## 3 HBR-A convergence divergence 0.00496831405175861 0 no impersonal
## 4 KPB-A convergence convergence -0.0304236372880813 0 no close
## 5 MJG-B convergence divergence 0.179855964890547 0 no close
## 6 NLO-B convergence convergence -0.0632223762383439 0 no close
## 7 OAL-A convergence convergence -0.0568871709273923 0 no impersonal
## 8 OAL-B convergence divergence 0.0895022962542955 0 no impersonal
## 9 OXQ-A convergence divergence 0.0477440405746218 0 no impersonal
## 10 OXQ-B convergence divergence 0.069221210908606 0 no impersonal
## 11 QRT-B convergence convergence -0.105297705492937 0 no close
## 12 SGB-A convergence divergence 0.137721634682491 0 no impersonal
## 13 SGB-B convergence divergence 0.0492801672329371 0 no impersonal
## 14 SUK-A convergence convergence -0.0110708338044713 0 no impersonal
## 15 SUK-B convergence convergence -0.125985004560869 0 no impersonal
## 16 TTY-B convergence convergence -0.220872291596236 0 yes close
## 17 VDE-B convergence convergence -0.0254190662169023 0 no impersonal
## 18 AML-B convergence convergence -0.0827303128312628 1 no impersonal
## 19 FXO-A convergence convergence -0.043178214859707 1 no impersonal
## 20 HAG-A convergence convergence -0.139158951489129 1 no close
## 21 KDA-A convergence convergence -0.00326332941577587 1 no impersonal
## 22 KDA-B convergence divergence 0.000126012247539085 1 no impersonal
## 23 KPB-B convergence divergence 0.0427356209487656 1 no close
## 24 NLO-A convergence divergence 0.108265693725868 1 no close
## 25 TTN-A convergence divergence 0.0716855006099233 1 no close
## 26 TTN-B convergence convergence -0.124610176761358 1 no close
## 27 ZNV-A convergence divergence 0.127753701807482 1 no close
## 28 HBR-B convergence divergence 0.0681380809942917 2 no impersonal
## 29 QRT-A convergence convergence -0.0121899842183128 2 no close
## 30 VDE-A convergence divergence 0.0108995823759431 2 no impersonal
## 31 HAG-B convergence convergence -0.169148789632483 4 no close
## 32 HUJ-A convergence divergence 0.143208325542347 4 no close
## 33 HUJ-B convergence divergence 0.215219155138621 6 no close
## 34 TTY-A convergence convergence -0.155938155105248 6 no close
## 35 ZNV-B convergence divergence 0.0878094623727449 6 no close
## 36 MJG-A convergence divergence 0.215852904124754 10 no close
## feature section
## 1 f0median Diapix
## 2 f0median Diapix
## 3 f0median Diapix
## 4 f0median Diapix
## 5 f0median Diapix
## 6 f0median Diapix
## 7 f0median Diapix
## 8 f0median Diapix
## 9 f0median Diapix
## 10 f0median Diapix
## 11 f0median Diapix
## 12 f0median Diapix
## 13 f0median Diapix
## 14 f0median Diapix
## 15 f0median Diapix
## 16 f0median Diapix
## 17 f0median Diapix
## 18 f0median Diapix
## 19 f0median Diapix
## 20 f0median Diapix
## 21 f0median Diapix
## 22 f0median Diapix
## 23 f0median Diapix
## 24 f0median Diapix
## 25 f0median Diapix
## 26 f0median Diapix
## 27 f0median Diapix
## 28 f0median Diapix
## 29 f0median Diapix
## 30 f0median Diapix
## 31 f0median Diapix
## 32 f0median Diapix
## 33 f0median Diapix
## 34 f0median Diapix
## 35 f0median Diapix
## 36 f0median Diapix
ggplot(dat, aes(turnOverall, f0maxDiff))+
geom_point()+
geom_smooth(method="lm")+
facet_wrap(~speaker)
## `geom_smooth()` using formula = 'y ~ x'
## Warning: Removed 288 rows containing non-finite values (`stat_smooth()`).
## Warning: Removed 288 rows containing missing values (`geom_point()`).
corS <- data.frame(matrix(nrow=0, ncol=4))
names(corS) <- c("speaker", "type", "r", "p")
for(s in unique(dat$speaker)){
d <- dat |> filter(speaker==s)
c <- cor.test(d$f0maxDiff, d$turnOverall,
alternative="two.sided", method="pearson")
corS[nrow(corS)+1,] <- c(s, "real", c$estimate, c$p.value)
for(i in 1:10){
c <- cor.test(d[,paste0("mockMaxDiff", i)], d$turnOverall,
alternative="two.sided", method="pearson")
corS[nrow(corS)+1,] <- c(s, "mock", c$estimate, c$p.value)
}
}
corS <- corS |>
mutate(sign = ifelse(p < 0.05, "*", NA),
direction = case_when(
r < 0 ~ "convergence",
r > 0 ~ "divergence"))
count <- data.frame(matrix(nrow=0, ncol=6))
names(count) <- c("speaker", "type", "direction", "coefficient", "mock", "real")
for(s in unique(corS$speaker)){
count[nrow(count)+1,] <- c(s,
"convergence",
corS$direction[corS$speaker==s & corS$type=="real"],
corS$r[corS$speaker==s & corS$type=="real"],
sum(!is.na(corS$sign[corS$speaker==s & corS$type=="mock"])==TRUE),
ifelse(corS$sign[corS$speaker==s & corS$type=="real"]=="*", "yes", "no"))
}
count <- merge(count, dat |> select(condition, speaker) |> filter(!duplicated(speaker)), by ="speaker")
count <- count |>
mutate_at("mock", as.numeric) |>
arrange(mock) |>
mutate(feature = "f0max",
section = "entireExp",
real = case_when(
mock > 1 | is.na(real) ~ "no",
.default = as.character(real)
))
all <- rbind(all, count)
No one seemed to converge or diverge on f0 max.
print(count)
## speaker type direction coefficient mock real condition
## 1 FWR-A convergence divergence 0.195053915574296 0 no close
## 2 HAG-A convergence divergence 0.0435974260241883 0 no close
## 3 HBR-A convergence divergence 0.0852160213942602 0 no impersonal
## 4 HUJ-A convergence divergence 0.0264816729442473 0 no close
## 5 HUJ-B convergence convergence -0.0088900092639663 0 no close
## 6 KDA-A convergence convergence -0.0141080708683843 0 no impersonal
## 7 KDA-B convergence divergence 0.0298363008427167 0 no impersonal
## 8 KPB-B convergence convergence -0.0185706119535337 0 no close
## 9 MJG-A convergence convergence -0.0523415243973334 0 no close
## 10 NLO-B convergence divergence 0.0837320474903288 0 no close
## 11 QRT-A convergence divergence 0.011549552899041 0 no close
## 12 SGB-B convergence convergence -0.0842949435657642 0 no impersonal
## 13 SUK-A convergence divergence 0.0523540536536774 0 no impersonal
## 14 SUK-B convergence convergence -0.125408861712307 0 no impersonal
## 15 TTY-B convergence convergence -0.125693469234968 0 no close
## 16 VDE-B convergence divergence 0.0729246516544962 0 no impersonal
## 17 AML-B convergence divergence 0.103808291542023 1 no impersonal
## 18 FWR-B convergence convergence -0.134951173385048 1 no close
## 19 HBR-B convergence divergence 0.0621035498940902 1 no impersonal
## 20 MJG-B convergence convergence -0.0651902486689506 1 no close
## 21 OXQ-A convergence convergence -0.0847541361802634 1 no impersonal
## 22 QRT-B convergence divergence 0.0997800705740454 1 no close
## 23 TTN-A convergence convergence -0.0251091331120037 1 no close
## 24 AML-A convergence divergence 0.128806264299608 2 no impersonal
## 25 FXO-A convergence convergence -0.130250361429113 2 no impersonal
## 26 HAG-B convergence convergence -0.00264982355773621 2 no close
## 27 KPB-A convergence divergence 0.174598112103041 2 no close
## 28 OAL-A convergence convergence -0.118887343437888 2 no impersonal
## 29 OXQ-B convergence convergence -0.0866413306656346 2 no impersonal
## 30 TTN-B convergence convergence -0.0930170531150306 2 no close
## 31 TTY-A convergence divergence 0.0941140076253447 2 no close
## 32 ZNV-B convergence convergence -0.141102143040279 2 no close
## 33 NLO-A convergence convergence -0.0262142453422706 3 no close
## 34 SGB-A convergence divergence 0.0709269411109755 3 no impersonal
## 35 VDE-A convergence divergence 0.250965956882606 5 no impersonal
## 36 OAL-B convergence convergence -0.166686346228455 6 no impersonal
## 37 ZNV-A convergence convergence -0.280203241209376 6 no close
## 38 FXO-B convergence convergence -0.0551698498902035 7 no impersonal
## feature section
## 1 f0max entireExp
## 2 f0max entireExp
## 3 f0max entireExp
## 4 f0max entireExp
## 5 f0max entireExp
## 6 f0max entireExp
## 7 f0max entireExp
## 8 f0max entireExp
## 9 f0max entireExp
## 10 f0max entireExp
## 11 f0max entireExp
## 12 f0max entireExp
## 13 f0max entireExp
## 14 f0max entireExp
## 15 f0max entireExp
## 16 f0max entireExp
## 17 f0max entireExp
## 18 f0max entireExp
## 19 f0max entireExp
## 20 f0max entireExp
## 21 f0max entireExp
## 22 f0max entireExp
## 23 f0max entireExp
## 24 f0max entireExp
## 25 f0max entireExp
## 26 f0max entireExp
## 27 f0max entireExp
## 28 f0max entireExp
## 29 f0max entireExp
## 30 f0max entireExp
## 31 f0max entireExp
## 32 f0max entireExp
## 33 f0max entireExp
## 34 f0max entireExp
## 35 f0max entireExp
## 36 f0max entireExp
## 37 f0max entireExp
## 38 f0max entireExp
Since we could argue that the speakers converge to each other only in the beginning, and that in the second part of the experiment the difference between f0s doesn’t keep decreasing (or increasing)
ggplot(dat |> filter(task=="Lists"), aes(turnOverall, f0maxDiff))+
geom_point()+
geom_smooth(method="lm")+
facet_wrap(~speaker)
## `geom_smooth()` using formula = 'y ~ x'
## Warning: Removed 97 rows containing non-finite values (`stat_smooth()`).
## Warning: Removed 97 rows containing missing values (`geom_point()`).
corS <- data.frame(matrix(nrow=0, ncol=4))
names(corS) <- c("speaker", "type", "r", "p")
for(s in unique(dat$speaker)){
d <- dat |> filter(speaker==s, task=="Lists")
c <- cor.test(d$f0maxDiff, d$turnOverall,
alternative="two.sided", method="pearson")
corS[nrow(corS)+1,] <- c(s, "real", c$estimate, c$p.value)
for(i in 1:10){
c <- cor.test(d[,paste0("mockMaxDiff", i)], d$turnOverall,
alternative="two.sided", method="pearson")
corS[nrow(corS)+1,] <- c(s, "mock", c$estimate, c$p.value)
}
}
corS <- corS |>
mutate(sign = ifelse(p < 0.05, "*", NA),
direction = case_when(
r < 0 ~ "convergence",
r > 0 ~ "divergence"))
count <- data.frame(matrix(nrow=0, ncol=6))
names(count) <- c("speaker", "type", "direction", "coefficient", "mock", "real")
for(s in unique(corS$speaker)){
count[nrow(count)+1,] <- c(s,
"convergence",
corS$direction[corS$speaker==s & corS$type=="real"],
corS$r[corS$speaker==s & corS$type=="real"],
sum(!is.na(corS$sign[corS$speaker==s & corS$type=="mock"])==TRUE),
ifelse(corS$sign[corS$speaker==s & corS$type=="real"]=="*", "yes", "no"))
}
count <- merge(count, dat |> select(condition, speaker) |> filter(!duplicated(speaker)), by ="speaker")
count <- count |>
mutate_at("mock", as.numeric) |>
arrange(mock) |>
mutate(feature = "f0max",
section = "Lists",
real = case_when(
mock > 1 | is.na(real) ~ "no",
.default = as.character(real)
))
all <- rbind(all, count)
Below we see the only 2 speakers whose correlation was significant between time and real f0 median differences AND who had no more than 1 significant correlation between time and randomly ordered f0 median.
One converged (in the close condition) and one diverged (impersonal condition) on f0 max.
print(count)
## speaker type direction coefficient mock real condition
## 1 AML-A convergence convergence -0.0626303610955421 0 no impersonal
## 2 AML-B convergence convergence -0.137472448004817 0 no impersonal
## 3 FWR-A convergence divergence 0.195053915574296 0 no close
## 4 HAG-A convergence convergence -0.124051226935211 0 no close
## 5 KDA-A convergence divergence 0.0889225609819929 0 no impersonal
## 6 NLO-A convergence convergence -0.132572372228093 0 no close
## 7 NLO-B convergence convergence -0.443954182939341 0 yes close
## 8 OAL-A convergence convergence -0.162915287248881 0 no impersonal
## 9 OXQ-B convergence divergence 0.104661144667901 0 no impersonal
## 10 QRT-A convergence divergence 0.147478402879362 0 no close
## 11 QRT-B convergence divergence 0.125567025914835 0 no close
## 12 SGB-A convergence convergence -0.0839756904741547 0 no impersonal
## 13 SUK-B convergence divergence 0.291699724936196 0 no impersonal
## 14 TTY-B convergence divergence 0.16839895147975 0 no close
## 15 VDE-A convergence convergence -0.0631385635184396 0 no impersonal
## 16 VDE-B convergence convergence -0.276028480202935 0 yes impersonal
## 17 ZNV-B convergence divergence 0.0736673095224291 0 no close
## 18 FWR-B convergence convergence -0.134951173385048 1 no close
## 19 HAG-B convergence convergence -0.189245055548581 1 no close
## 20 KPB-B convergence divergence 0.503872552561909 1 no close
## 21 MJG-A convergence convergence -0.187920194544154 1 no close
## 22 MJG-B convergence convergence -0.18280788432291 1 no close
## 23 OXQ-A convergence convergence -0.085880364487863 1 no impersonal
## 24 SUK-A convergence divergence 0.0631106131160197 1 no impersonal
## 25 TTN-A convergence convergence -0.0786115569458072 1 no close
## 26 HBR-A convergence convergence -0.0641652582689635 2 no impersonal
## 27 HUJ-A convergence divergence 0.137300342658815 2 no close
## 28 KDA-B convergence divergence 0.0236434627523229 2 no impersonal
## 29 OAL-B convergence convergence -0.11120762451653 2 no impersonal
## 30 TTN-B convergence divergence 0.244375632518684 2 no close
## 31 TTY-A convergence divergence 0.18222840071088 2 no close
## 32 ZNV-A convergence divergence 0.0353426933221924 2 no close
## 33 FXO-B convergence convergence -0.197644259218367 3 no impersonal
## 34 KPB-A convergence divergence 0.561528733407216 3 no close
## 35 SGB-B convergence convergence -0.0211870198904643 3 no impersonal
## 36 HBR-B convergence convergence -0.304817762472401 4 no impersonal
## 37 FXO-A convergence divergence 0.0514575241315975 5 no impersonal
## 38 HUJ-B convergence divergence 0.229943553107705 8 no close
## feature section
## 1 f0max Lists
## 2 f0max Lists
## 3 f0max Lists
## 4 f0max Lists
## 5 f0max Lists
## 6 f0max Lists
## 7 f0max Lists
## 8 f0max Lists
## 9 f0max Lists
## 10 f0max Lists
## 11 f0max Lists
## 12 f0max Lists
## 13 f0max Lists
## 14 f0max Lists
## 15 f0max Lists
## 16 f0max Lists
## 17 f0max Lists
## 18 f0max Lists
## 19 f0max Lists
## 20 f0max Lists
## 21 f0max Lists
## 22 f0max Lists
## 23 f0max Lists
## 24 f0max Lists
## 25 f0max Lists
## 26 f0max Lists
## 27 f0max Lists
## 28 f0max Lists
## 29 f0max Lists
## 30 f0max Lists
## 31 f0max Lists
## 32 f0max Lists
## 33 f0max Lists
## 34 f0max Lists
## 35 f0max Lists
## 36 f0max Lists
## 37 f0max Lists
## 38 f0max Lists
Since we could argue that the speakers converge to each other more in the second part of the experiment, after they already got to know each other (and like each other or not) in the first part.
ggplot(dat |> filter(task=="Diapix"), aes(turnOverall, f0maxDiff))+
geom_point()+
geom_smooth(method="lm")+
facet_wrap(~speaker)
## `geom_smooth()` using formula = 'y ~ x'
## Warning: Removed 191 rows containing non-finite values (`stat_smooth()`).
## Warning: Removed 191 rows containing missing values (`geom_point()`).
corS <- data.frame(matrix(nrow=0, ncol=4))
names(corS) <- c("speaker", "type", "r", "p")
for(s in unique(dat$speaker)){
d <- dat |> filter(speaker==s, task=="Diapix")
if(nrow(d) ==0){next} # because we don't have the Diapix files for one dyad
c <- cor.test(d$f0maxDiff, d$turnOverall,
alternative="two.sided", method="pearson")
corS[nrow(corS)+1,] <- c(s, "real", c$estimate, c$p.value)
for(i in 1:10){
c <- cor.test(d[,paste0("mockMaxDiff", i)], d$turnOverall,
alternative="two.sided", method="pearson")
corS[nrow(corS)+1,] <- c(s, "mock", c$estimate, c$p.value)
}
}
corS <- corS |>
mutate(sign = ifelse(p < 0.05, "*", NA),
direction = case_when(
r < 0 ~ "convergence",
r > 0 ~ "divergence"))
count <- data.frame(matrix(nrow=0, ncol=6))
names(count) <- c("speaker", "type", "direction", "coefficient", "mock", "real")
for(s in unique(corS$speaker)){
count[nrow(count)+1,] <- c(s,
"convergence",
corS$direction[corS$speaker==s & corS$type=="real"],
corS$r[corS$speaker==s & corS$type=="real"],
sum(!is.na(corS$sign[corS$speaker==s & corS$type=="mock"])==TRUE),
ifelse(corS$sign[corS$speaker==s & corS$type=="real"]=="*", "yes", "no"))
}
count <- merge(count, dat |> select(condition, speaker) |> filter(!duplicated(speaker)), by ="speaker")
count <- count |>
mutate_at("mock", as.numeric) |>
arrange(mock) |>
mutate(feature = "f0max",
section = "Diapix",
real = case_when(
mock > 1 | is.na(real) ~ "no",
.default = as.character(real)
))
all <- rbind(all, count)
Below we see the only 2 speakers whose correlation was significant between time and real f0 median differences AND who had no more than 1 significant correlation between time and randomly ordered f0 median.
One diverged and one converged on f0 max. Both were in the impersonal condition.
print(count)
## speaker type direction coefficient mock real condition
## 1 FXO-B convergence divergence 0.0974122952796964 0 no impersonal
## 2 HAG-A convergence divergence 0.0325481436955988 0 no close
## 3 HBR-A convergence divergence 0.215356401552828 0 no impersonal
## 4 KDA-A convergence convergence -0.195059068504469 0 no impersonal
## 5 KDA-B convergence convergence -0.170630867010071 0 no impersonal
## 6 KPB-B convergence divergence 0.0704765917566594 0 no close
## 7 MJG-B convergence convergence -0.116372109132848 0 no close
## 8 NLO-B convergence divergence 0.0460927768827829 0 no close
## 9 OXQ-B convergence convergence -0.0133341764805421 0 no impersonal
## 10 QRT-A convergence divergence 0.0779197276208909 0 no close
## 11 QRT-B convergence divergence 0.102040437968312 0 no close
## 12 SGB-A convergence divergence 0.0270359347073647 0 no impersonal
## 13 SUK-B convergence convergence -0.148783229602822 0 no impersonal
## 14 TTN-A convergence convergence -0.163852741608531 0 no close
## 15 TTY-B convergence convergence -0.147841922211197 0 no close
## 16 VDE-A convergence divergence 0.153119485091093 0 no impersonal
## 17 ZNV-A convergence divergence 0.0536471338734851 0 no close
## 18 ZNV-B convergence convergence -0.00864303754645299 0 no close
## 19 AML-A convergence divergence 0.146730552290373 1 no impersonal
## 20 FXO-A convergence convergence -0.0176680668070729 1 no impersonal
## 21 HAG-B convergence convergence -0.0580246050309827 1 no close
## 22 KPB-A convergence divergence 0.117358733371286 1 no close
## 23 MJG-A convergence convergence -0.145981119344687 1 no close
## 24 NLO-A convergence divergence 0.0165806333009691 1 no close
## 25 SGB-B convergence convergence -0.17679358314786 1 no impersonal
## 26 VDE-B convergence divergence 0.122539500552027 1 no impersonal
## 27 HUJ-A convergence divergence 0.11804686302089 2 no close
## 28 OAL-A convergence convergence -0.238229191413814 2 no impersonal
## 29 SUK-A convergence divergence 0.0571407238100443 2 no impersonal
## 30 AML-B convergence divergence 0.0985094506287954 3 no impersonal
## 31 HBR-B convergence divergence 0.220937824398609 3 no impersonal
## 32 HUJ-B convergence divergence 0.104582246445074 3 no close
## 33 OAL-B convergence convergence -0.167341105719336 3 no impersonal
## 34 TTN-B convergence convergence -0.148078253160875 3 no close
## 35 OXQ-A convergence divergence 0.0216457593161531 4 no impersonal
## 36 TTY-A convergence divergence 0.0150163535059419 5 no close
## feature section
## 1 f0max Diapix
## 2 f0max Diapix
## 3 f0max Diapix
## 4 f0max Diapix
## 5 f0max Diapix
## 6 f0max Diapix
## 7 f0max Diapix
## 8 f0max Diapix
## 9 f0max Diapix
## 10 f0max Diapix
## 11 f0max Diapix
## 12 f0max Diapix
## 13 f0max Diapix
## 14 f0max Diapix
## 15 f0max Diapix
## 16 f0max Diapix
## 17 f0max Diapix
## 18 f0max Diapix
## 19 f0max Diapix
## 20 f0max Diapix
## 21 f0max Diapix
## 22 f0max Diapix
## 23 f0max Diapix
## 24 f0max Diapix
## 25 f0max Diapix
## 26 f0max Diapix
## 27 f0max Diapix
## 28 f0max Diapix
## 29 f0max Diapix
## 30 f0max Diapix
## 31 f0max Diapix
## 32 f0max Diapix
## 33 f0max Diapix
## 34 f0max Diapix
## 35 f0max Diapix
## 36 f0max Diapix
ggplot(dat, aes(turnOverall, f0sdDiff))+
geom_point()+
geom_smooth(method="lm")+
facet_wrap(~speaker)
## `geom_smooth()` using formula = 'y ~ x'
## Warning: Removed 575 rows containing non-finite values (`stat_smooth()`).
## Warning: Removed 575 rows containing missing values (`geom_point()`).
corS <- data.frame(matrix(nrow=0, ncol=4))
names(corS) <- c("speaker", "type", "r", "p")
for(s in unique(dat$speaker)){
d <- dat |> filter(speaker==s)
c <- cor.test(d$f0sdDiff, d$turnOverall,
alternative="two.sided", method="pearson")
corS[nrow(corS)+1,] <- c(s, "real", c$estimate, c$p.value)
for(i in 1:10){
c <- cor.test(d[,paste0("mockSdDiff", i)], d$turnOverall,
alternative="two.sided", method="pearson")
corS[nrow(corS)+1,] <- c(s, "mock", c$estimate, c$p.value)
}
}
corS <- corS |>
mutate(sign = ifelse(p < 0.05, "*", NA),
direction = case_when(
r < 0 ~ "convergence",
r > 0 ~ "divergence"))
count <- data.frame(matrix(nrow=0, ncol=6))
names(count) <- c("speaker", "type", "direction", "coefficient", "mock", "real")
for(s in unique(corS$speaker)){
count[nrow(count)+1,] <- c(s,
"convergence",
corS$direction[corS$speaker==s & corS$type=="real"],
corS$r[corS$speaker==s & corS$type=="real"],
sum(!is.na(corS$sign[corS$speaker==s & corS$type=="mock"])==TRUE),
ifelse(corS$sign[corS$speaker==s & corS$type=="real"]=="*", "yes", "no"))
}
count <- merge(count, dat |> select(condition, speaker) |> filter(!duplicated(speaker)), by ="speaker")
count <- count |>
mutate_at("mock", as.numeric) |>
arrange(mock) |>
mutate(feature = "f0sd",
section = "entireExp",
real = case_when(
mock > 1 | is.na(real) ~ "no",
.default = as.character(real)
))
all <- rbind(all, count)
No one seemed to converge or diverge on f0 sd.
print(count)
## speaker type direction coefficient mock real condition
## 1 AML-A convergence divergence 0.0214347198164549 0 no impersonal
## 2 FWR-A convergence divergence 0.381410831855009 0 no close
## 3 HAG-A convergence convergence -0.111388711938931 0 no close
## 4 HBR-A convergence divergence 0.11055116508764 0 no impersonal
## 5 HUJ-A convergence divergence 0.112514218269034 0 no close
## 6 MJG-A convergence divergence 0.108364718505304 0 no close
## 7 OAL-A convergence convergence -0.298741868255242 0 yes impersonal
## 8 OXQ-A convergence divergence 0.0883732843677492 0 no impersonal
## 9 OXQ-B convergence divergence 0.129968075164485 0 no impersonal
## 10 SGB-B convergence convergence -0.017808362944551 0 no impersonal
## 11 SUK-B convergence divergence 0.0815035913537748 0 no impersonal
## 12 TTN-A convergence divergence 0.0305506349296566 0 no close
## 13 TTN-B convergence convergence -0.0310049868726606 0 no close
## 14 ZNV-B convergence convergence -0.201336693011536 0 yes close
## 15 AML-B convergence convergence -0.000225128971050298 1 no impersonal
## 16 FWR-B convergence divergence 0.0507325591874094 1 no close
## 17 HAG-B convergence convergence -0.0199481020245841 1 no close
## 18 HBR-B convergence convergence -0.0904705999350023 1 no impersonal
## 19 HUJ-B convergence divergence 0.156278049333046 1 no close
## 20 SGB-A convergence divergence 0.0110069881251993 1 no impersonal
## 21 SUK-A convergence divergence 0.0145140989267818 1 no impersonal
## 22 TTY-A convergence convergence -0.0459941236150303 1 no close
## 23 TTY-B convergence convergence -0.115572975791875 1 no close
## 24 FXO-B convergence convergence -0.0176871120807929 2 no impersonal
## 25 KDA-A convergence convergence -0.0612453321167161 2 no impersonal
## 26 KDA-B convergence divergence 0.139130627674164 2 no impersonal
## 27 KPB-B convergence divergence 0.0670924000700283 2 no close
## 28 NLO-B convergence divergence 0.134312383035443 2 no close
## 29 ZNV-A convergence convergence -0.217826307627929 2 no close
## 30 FXO-A convergence convergence -0.254255544069265 3 no impersonal
## 31 KPB-A convergence divergence 0.206230871992645 3 no close
## 32 MJG-B convergence divergence 0.142863154421377 3 no close
## 33 VDE-A convergence divergence 0.29847223421496 3 no impersonal
## 34 NLO-A convergence divergence 0.0546105591199303 4 no close
## 35 QRT-B convergence divergence 0.251210464706423 4 no close
## 36 QRT-A convergence divergence 0.0991090120655462 5 no close
## 37 OAL-B convergence convergence -0.197777896513274 7 no impersonal
## 38 VDE-B convergence divergence 0.262492005058125 8 no impersonal
## feature section
## 1 f0sd entireExp
## 2 f0sd entireExp
## 3 f0sd entireExp
## 4 f0sd entireExp
## 5 f0sd entireExp
## 6 f0sd entireExp
## 7 f0sd entireExp
## 8 f0sd entireExp
## 9 f0sd entireExp
## 10 f0sd entireExp
## 11 f0sd entireExp
## 12 f0sd entireExp
## 13 f0sd entireExp
## 14 f0sd entireExp
## 15 f0sd entireExp
## 16 f0sd entireExp
## 17 f0sd entireExp
## 18 f0sd entireExp
## 19 f0sd entireExp
## 20 f0sd entireExp
## 21 f0sd entireExp
## 22 f0sd entireExp
## 23 f0sd entireExp
## 24 f0sd entireExp
## 25 f0sd entireExp
## 26 f0sd entireExp
## 27 f0sd entireExp
## 28 f0sd entireExp
## 29 f0sd entireExp
## 30 f0sd entireExp
## 31 f0sd entireExp
## 32 f0sd entireExp
## 33 f0sd entireExp
## 34 f0sd entireExp
## 35 f0sd entireExp
## 36 f0sd entireExp
## 37 f0sd entireExp
## 38 f0sd entireExp
Since we could argue that the speakers converge to each other only in the beginning, and that in the second part of the experiment the difference between f0s doesn’t keep decreasing (or increasing)
ggplot(dat |> filter(task=="Lists"), aes(turnOverall, f0sdDiff))+
geom_point()+
geom_smooth(method="lm")+
facet_wrap(~speaker)
## `geom_smooth()` using formula = 'y ~ x'
## Warning: Removed 152 rows containing non-finite values (`stat_smooth()`).
## Warning: Removed 152 rows containing missing values (`geom_point()`).
corS <- data.frame(matrix(nrow=0, ncol=4))
names(corS) <- c("speaker", "type", "r", "p")
for(s in unique(dat$speaker)){
d <- dat |> filter(speaker==s, task=="Lists")
c <- cor.test(d$f0sdDiff, d$turnOverall,
alternative="two.sided", method="pearson")
corS[nrow(corS)+1,] <- c(s, "real", c$estimate, c$p.value)
for(i in 1:10){
c <- cor.test(d[,paste0("mockSdDiff", i)], d$turnOverall,
alternative="two.sided", method="pearson")
corS[nrow(corS)+1,] <- c(s, "mock", c$estimate, c$p.value)
}
}
corS <- corS |>
mutate(sign = ifelse(p < 0.05, "*", NA),
direction = case_when(
r < 0 ~ "convergence",
r > 0 ~ "divergence"))
count <- data.frame(matrix(nrow=0, ncol=6))
names(count) <- c("speaker", "type", "direction", "coefficient", "mock", "real")
for(s in unique(corS$speaker)){
count[nrow(count)+1,] <- c(s,
"convergence",
corS$direction[corS$speaker==s & corS$type=="real"],
corS$r[corS$speaker==s & corS$type=="real"],
sum(!is.na(corS$sign[corS$speaker==s & corS$type=="mock"])==TRUE),
ifelse(corS$sign[corS$speaker==s & corS$type=="real"]=="*", "yes", "no"))
}
count <- merge(count, dat |> select(condition, speaker) |> filter(!duplicated(speaker)), by ="speaker")
count <- count |>
mutate_at("mock", as.numeric) |>
arrange(mock) |>
mutate(feature = "f0sd",
section = "Lists",
real = case_when(
mock > 1 | is.na(real) ~ "no",
.default = as.character(real)
))
all <- rbind(all, count)
Below we see the only 2 speakers whose correlation was significant between time and real f0 median differences AND who had no more than 1 significant correlation between time and randomly ordered f0 median.
One converged (in the impersonal condition) and one diverged (close condition) on f0 sd.
print(count)
## speaker type direction coefficient mock real condition
## 1 FWR-A convergence divergence 0.381410831855009 0 no close
## 2 FXO-A convergence divergence 0.0121891760211383 0 no impersonal
## 3 HAG-A convergence convergence -0.00757213709685002 0 no close
## 4 HAG-B convergence divergence 0.0187532193945991 0 no close
## 5 HBR-A convergence divergence 0.302663055942697 0 no impersonal
## 6 HBR-B convergence convergence -0.155623451845258 0 no impersonal
## 7 HUJ-A convergence divergence 0.240509690797476 0 no close
## 8 KDA-B convergence convergence -0.00978828650430149 0 no impersonal
## 9 KPB-B convergence divergence 0.145399904108498 0 no close
## 10 MJG-B convergence convergence -0.356490052874218 0 no close
## 11 NLO-A convergence convergence -0.108842531567252 0 no close
## 12 OAL-B convergence convergence -0.0734388442832312 0 no impersonal
## 13 OXQ-A convergence convergence -0.117064541273057 0 no impersonal
## 14 SGB-A convergence divergence 0.0808076859175227 0 no impersonal
## 15 TTN-A convergence convergence -0.0846889361583049 0 no close
## 16 TTN-B convergence divergence 0.0334423909758696 0 no close
## 17 VDE-A convergence convergence -0.31549012844638 0 yes impersonal
## 18 ZNV-A convergence convergence -0.211092977165392 0 no close
## 19 ZNV-B convergence convergence -0.143139035328182 0 no close
## 20 FWR-B convergence divergence 0.0507325591874094 1 no close
## 21 KDA-A convergence convergence -0.0896216568251073 1 no impersonal
## 22 KPB-A convergence divergence 0.408712196058522 1 no close
## 23 NLO-B convergence divergence 0.102060759285047 1 no close
## 24 SGB-B convergence divergence 0.438803836732088 1 yes impersonal
## 25 SUK-A convergence divergence 0.134359079209268 1 no impersonal
## 26 SUK-B convergence divergence 0.136570853070235 1 no impersonal
## 27 TTY-A convergence convergence -0.180984245316223 1 no close
## 28 TTY-B convergence divergence 0.358199677461476 1 no close
## 29 VDE-B convergence convergence -0.164884973273884 1 no impersonal
## 30 FXO-B convergence convergence -0.101221268984188 2 no impersonal
## 31 OAL-A convergence convergence -0.195543304328618 2 no impersonal
## 32 OXQ-B convergence divergence 0.0980841393836394 2 no impersonal
## 33 AML-A convergence divergence 0.210803292805898 3 no impersonal
## 34 MJG-A convergence convergence -0.331569089539631 3 no close
## 35 QRT-B convergence divergence 0.0511840573862018 3 no close
## 36 QRT-A convergence divergence 0.301527018343838 4 no close
## 37 HUJ-B convergence divergence 0.332677030997308 6 no close
## 38 AML-B convergence convergence -0.00841782023317876 8 no impersonal
## feature section
## 1 f0sd Lists
## 2 f0sd Lists
## 3 f0sd Lists
## 4 f0sd Lists
## 5 f0sd Lists
## 6 f0sd Lists
## 7 f0sd Lists
## 8 f0sd Lists
## 9 f0sd Lists
## 10 f0sd Lists
## 11 f0sd Lists
## 12 f0sd Lists
## 13 f0sd Lists
## 14 f0sd Lists
## 15 f0sd Lists
## 16 f0sd Lists
## 17 f0sd Lists
## 18 f0sd Lists
## 19 f0sd Lists
## 20 f0sd Lists
## 21 f0sd Lists
## 22 f0sd Lists
## 23 f0sd Lists
## 24 f0sd Lists
## 25 f0sd Lists
## 26 f0sd Lists
## 27 f0sd Lists
## 28 f0sd Lists
## 29 f0sd Lists
## 30 f0sd Lists
## 31 f0sd Lists
## 32 f0sd Lists
## 33 f0sd Lists
## 34 f0sd Lists
## 35 f0sd Lists
## 36 f0sd Lists
## 37 f0sd Lists
## 38 f0sd Lists
Since we could argue that the speakers converge to each other more in the second part of the experiment, after they already got to know each other (and like each other or not) in the first part.
ggplot(dat |> filter(task=="Diapix"), aes(turnOverall, f0sdDiff))+
geom_point()+
geom_smooth(method="lm")+
facet_wrap(~speaker)
## `geom_smooth()` using formula = 'y ~ x'
## Warning: Removed 423 rows containing non-finite values (`stat_smooth()`).
## Warning: Removed 423 rows containing missing values (`geom_point()`).
corS <- data.frame(matrix(nrow=0, ncol=4))
names(corS) <- c("speaker", "type", "r", "p")
for(s in unique(dat$speaker)){
d <- dat |> filter(speaker==s, task=="Diapix")
if(nrow(d) ==0){next} # because we don't have the Diapix files for one dyad
c <- cor.test(d$f0sdDiff, d$turnOverall,
alternative="two.sided", method="pearson")
corS[nrow(corS)+1,] <- c(s, "real", c$estimate, c$p.value)
for(i in 1:10){
c <- cor.test(d[,paste0("mockSdDiff", i)], d$turnOverall,
alternative="two.sided", method="pearson")
corS[nrow(corS)+1,] <- c(s, "mock", c$estimate, c$p.value)
}
}
corS <- corS |>
mutate(sign = ifelse(p < 0.05, "*", NA),
direction = case_when(
r < 0 ~ "convergence",
r > 0 ~ "divergence"))
count <- data.frame(matrix(nrow=0, ncol=6))
names(count) <- c("speaker", "type", "direction", "coefficient", "mock", "real")
for(s in unique(corS$speaker)){
count[nrow(count)+1,] <- c(s,
"convergence",
corS$direction[corS$speaker==s & corS$type=="real"],
corS$r[corS$speaker==s & corS$type=="real"],
sum(!is.na(corS$sign[corS$speaker==s & corS$type=="mock"])==TRUE),
ifelse(corS$sign[corS$speaker==s & corS$type=="real"]=="*", "yes", "no"))
}
count <- merge(count, dat |> select(condition, speaker) |> filter(!duplicated(speaker)), by ="speaker")
count <- count |>
mutate_at("mock", as.numeric) |>
arrange(mock) |>
mutate(feature = "f0sd",
section = "Diapix",
real = case_when(
mock > 1 | is.na(real) ~ "no",
.default = as.character(real)
))
all <- rbind(all, count)
Below we see the only 2 speakers whose correlation was significant between time and real f0 median differences AND who had no more than 1 significant correlation between time and randomly ordered f0 median.
One converged (in the impersonal condition) and one diverged (close condition) on f0 sd.
print(count)
## speaker type direction coefficient mock real condition
## 1 AML-A convergence divergence 0.0942197153003442 0 no impersonal
## 2 FXO-B convergence divergence 0.049219819131411 0 no impersonal
## 3 HAG-A convergence convergence -0.0760416222732127 0 no close
## 4 HAG-B convergence convergence -0.0184458828754394 0 no close
## 5 HBR-A convergence divergence 0.0936714058114991 0 no impersonal
## 6 HUJ-A convergence divergence 0.0751725996909468 0 no close
## 7 MJG-A convergence convergence -0.101819594058672 0 no close
## 8 NLO-A convergence divergence 0.000666481925621654 0 no close
## 9 NLO-B convergence divergence 0.124821781378392 0 no close
## 10 OAL-A convergence convergence -0.324655159586839 0 yes impersonal
## 11 OXQ-A convergence divergence 0.063287055218376 0 no impersonal
## 12 OXQ-B convergence divergence 0.0534343869841281 0 no impersonal
## 13 QRT-A convergence divergence 0.00950107671534853 0 no close
## 14 TTN-A convergence convergence -0.0449604005575976 0 no close
## 15 TTY-A convergence divergence 0.00536565759536223 0 no close
## 16 TTY-B convergence convergence -0.0676666445524121 0 no close
## 17 VDE-B convergence divergence 0.101687708260141 0 no impersonal
## 18 ZNV-B convergence convergence -0.0973549456622127 0 no close
## 19 AML-B convergence convergence -0.0214090614850532 1 no impersonal
## 20 FXO-A convergence convergence -0.166141472032452 1 no impersonal
## 21 HBR-B convergence convergence -0.0343083755393114 1 no impersonal
## 22 HUJ-B convergence divergence 0.0716311843561288 1 no close
## 23 KDA-A convergence convergence -0.17957608473935 1 no impersonal
## 24 KPB-A convergence divergence 0.138716143514441 1 no close
## 25 KPB-B convergence convergence -0.00697540446754079 1 no close
## 26 QRT-B convergence divergence 0.241788295665581 1 yes close
## 27 SGB-B convergence convergence -0.0846969224524156 1 no impersonal
## 28 SUK-A convergence divergence 0.0985858289170189 1 no impersonal
## 29 SUK-B convergence divergence 0.100079409496665 1 no impersonal
## 30 TTN-B convergence convergence -0.19308810492801 1 no close
## 31 ZNV-A convergence divergence 0.0474148512043777 1 no close
## 32 KDA-B convergence convergence -0.0354262936436638 2 no impersonal
## 33 MJG-B convergence divergence 0.0217175547285901 2 no close
## 34 SGB-A convergence convergence -0.0346656866245028 2 no impersonal
## 35 OAL-B convergence convergence -0.205558164852786 4 no impersonal
## 36 VDE-A convergence divergence 0.083010733276941 5 no impersonal
## feature section
## 1 f0sd Diapix
## 2 f0sd Diapix
## 3 f0sd Diapix
## 4 f0sd Diapix
## 5 f0sd Diapix
## 6 f0sd Diapix
## 7 f0sd Diapix
## 8 f0sd Diapix
## 9 f0sd Diapix
## 10 f0sd Diapix
## 11 f0sd Diapix
## 12 f0sd Diapix
## 13 f0sd Diapix
## 14 f0sd Diapix
## 15 f0sd Diapix
## 16 f0sd Diapix
## 17 f0sd Diapix
## 18 f0sd Diapix
## 19 f0sd Diapix
## 20 f0sd Diapix
## 21 f0sd Diapix
## 22 f0sd Diapix
## 23 f0sd Diapix
## 24 f0sd Diapix
## 25 f0sd Diapix
## 26 f0sd Diapix
## 27 f0sd Diapix
## 28 f0sd Diapix
## 29 f0sd Diapix
## 30 f0sd Diapix
## 31 f0sd Diapix
## 32 f0sd Diapix
## 33 f0sd Diapix
## 34 f0sd Diapix
## 35 f0sd Diapix
## 36 f0sd Diapix
ggplot(dat, aes(f0medzGender, prevf0med))+
geom_point()+
geom_smooth(method="lm")+
facet_wrap(~speaker)
## `geom_smooth()` using formula = 'y ~ x'
## Warning: Removed 430 rows containing non-finite values (`stat_smooth()`).
## Warning: Removed 430 rows containing missing values (`geom_point()`).
corS <- data.frame(matrix(nrow=0, ncol=4))
names(corS) <- c("speaker", "type", "r", "p")
for(s in unique(dat$speaker)){
d <- dat |> filter(speaker==s)
c <- cor.test(d$f0medzGender, d$prevf0med,
alternative="two.sided", method="pearson")
corS[nrow(corS)+1,] <- c(s, "real", c$estimate, c$p.value)
for(i in 1:10){
c <- cor.test(d[,paste0("mockMed", i)], d$f0medzGender,
alternative="two.sided", method="pearson")
corS[nrow(corS)+1,] <- c(s, "mock", c$estimate, c$p.value)
}
}
corS <- corS |>
mutate(sign = ifelse(p < 0.05, "*", NA),
direction = case_when(
r < 0 ~ "synchrony",
r > 0 ~ "dyssynchrony"))
count <- data.frame(matrix(nrow=0, ncol=6))
names(count) <- c("speaker", "type", "direction", "coefficient", "mock", "real")
for(s in unique(corS$speaker)){
count[nrow(count)+1,] <- c(s,
"synchrony",
corS$direction[corS$speaker==s & corS$type=="real"],
corS$r[corS$speaker==s & corS$type=="real"],
sum(!is.na(corS$sign[corS$speaker==s & corS$type=="mock"])==TRUE),
ifelse(corS$sign[corS$speaker==s & corS$type=="real"]=="*", "yes", "no"))
}
count <- merge(count, dat |> select(condition, speaker) |> filter(!duplicated(speaker)), by ="speaker")
count <- count |>
mutate_at("mock", as.numeric) |>
arrange(mock) |>
mutate(feature = "f0median",
section = "entireExp",
real = case_when(
mock > 1 | is.na(real) ~ "no",
.default = as.character(real)
))
all <- rbind(all, count)
Below we see the 4 speakers whose correlation was significant between time and real f0 median differences AND who had no more than 1 significant correlation between time and randomly ordered f0 median.
One converged (impersonal condition) and three diverged (2 in impersonal and 1 in close condition.)
print(count)
## speaker type direction coefficient mock real condition
## 1 FWR-A synchrony synchrony -0.300563638303997 0 no close
## 2 FWR-B synchrony synchrony -0.0903338172619992 0 no close
## 3 HAG-A synchrony dyssynchrony 0.09824684292749 0 no close
## 4 HAG-B synchrony synchrony -0.0638284953170705 0 no close
## 5 HBR-A synchrony dyssynchrony 0.0299827656475949 0 no impersonal
## 6 HBR-B synchrony dyssynchrony 0.116881271324112 0 no impersonal
## 7 HUJ-A synchrony dyssynchrony 0.201242839663773 0 yes close
## 8 HUJ-B synchrony dyssynchrony 0.0627937017123163 0 no close
## 9 KDA-A synchrony dyssynchrony 0.121636944685442 0 no impersonal
## 10 KDA-B synchrony dyssynchrony 0.0538269394660566 0 no impersonal
## 11 KPB-B synchrony dyssynchrony 0.127578136161196 0 no close
## 12 MJG-A synchrony dyssynchrony 0.0899603891734553 0 no close
## 13 NLO-A synchrony dyssynchrony 0.147066488514547 0 no close
## 14 OAL-A synchrony dyssynchrony 0.0959107135753522 0 no impersonal
## 15 OAL-B synchrony dyssynchrony 0.143153142681874 0 no impersonal
## 16 OXQ-A synchrony dyssynchrony 0.00271900872204129 0 no impersonal
## 17 OXQ-B synchrony dyssynchrony 0.0498139093450386 0 no impersonal
## 18 QRT-A synchrony synchrony -0.11427055616047 0 no close
## 19 QRT-B synchrony synchrony -0.0553655168261072 0 no close
## 20 SGB-B synchrony synchrony -0.0729455287064786 0 no impersonal
## 21 SUK-A synchrony dyssynchrony 0.159852903688834 0 yes impersonal
## 22 SUK-B synchrony dyssynchrony 0.150288323630953 0 no impersonal
## 23 TTN-A synchrony dyssynchrony 0.0346756305164721 0 no close
## 24 TTN-B synchrony synchrony -0.0801561638823987 0 no close
## 25 TTY-A synchrony synchrony -0.11718419455687 0 no close
## 26 TTY-B synchrony synchrony -0.0694356817891461 0 no close
## 27 VDE-A synchrony synchrony -0.00698703654527092 0 no impersonal
## 28 ZNV-A synchrony dyssynchrony 0.161701332123916 0 no close
## 29 ZNV-B synchrony synchrony -0.0457097777453476 0 no close
## 30 AML-A synchrony dyssynchrony 0.0903646112279503 1 no impersonal
## 31 AML-B synchrony synchrony -0.146257952336042 1 no impersonal
## 32 KPB-A synchrony dyssynchrony 0.0313304212491666 1 no close
## 33 MJG-B synchrony dyssynchrony 0.0207224162094005 1 no close
## 34 NLO-B synchrony dyssynchrony 0.0461495226329855 1 no close
## 35 SGB-A synchrony synchrony -0.00858759410310959 1 no impersonal
## 36 VDE-B synchrony dyssynchrony 0.104140507293413 2 no impersonal
## 37 FXO-A synchrony dyssynchrony 0.214568139759 4 no impersonal
## 38 FXO-B synchrony dyssynchrony 0.258429839383353 5 no impersonal
## feature section
## 1 f0median entireExp
## 2 f0median entireExp
## 3 f0median entireExp
## 4 f0median entireExp
## 5 f0median entireExp
## 6 f0median entireExp
## 7 f0median entireExp
## 8 f0median entireExp
## 9 f0median entireExp
## 10 f0median entireExp
## 11 f0median entireExp
## 12 f0median entireExp
## 13 f0median entireExp
## 14 f0median entireExp
## 15 f0median entireExp
## 16 f0median entireExp
## 17 f0median entireExp
## 18 f0median entireExp
## 19 f0median entireExp
## 20 f0median entireExp
## 21 f0median entireExp
## 22 f0median entireExp
## 23 f0median entireExp
## 24 f0median entireExp
## 25 f0median entireExp
## 26 f0median entireExp
## 27 f0median entireExp
## 28 f0median entireExp
## 29 f0median entireExp
## 30 f0median entireExp
## 31 f0median entireExp
## 32 f0median entireExp
## 33 f0median entireExp
## 34 f0median entireExp
## 35 f0median entireExp
## 36 f0median entireExp
## 37 f0median entireExp
## 38 f0median entireExp
ggplot(dat |> filter(task=="Lists"), aes(f0medzGender, prevf0med))+
geom_point()+
geom_smooth(method="lm")+
facet_wrap(~speaker)
## `geom_smooth()` using formula = 'y ~ x'
## Warning: Removed 133 rows containing non-finite values (`stat_smooth()`).
## Warning: Removed 133 rows containing missing values (`geom_point()`).
corS <- data.frame(matrix(nrow=0, ncol=4))
names(corS) <- c("speaker", "type", "r", "p")
for(s in unique(dat$speaker)){
d <- dat |> filter(speaker==s, task=="Lists")
c <- cor.test(d$f0medzGender, d$prevf0med,
alternative="two.sided", method="pearson")
corS[nrow(corS)+1,] <- c(s, "real", c$estimate, c$p.value)
for(i in 1:10){
c <- cor.test(d[,paste0("mockMed", i)], d$f0medzGender,
alternative="two.sided", method="pearson")
corS[nrow(corS)+1,] <- c(s, "mock", c$estimate, c$p.value)
}
}
corS <- corS |>
mutate(sign = ifelse(p < 0.05, "*", NA),
direction = case_when(
r < 0 ~ "synchrony",
r > 0 ~ "dyssynchrony"))
count <- data.frame(matrix(nrow=0, ncol=6))
names(count) <- c("speaker", "type", "direction", "coefficient", "mock", "real")
for(s in unique(corS$speaker)){
count[nrow(count)+1,] <- c(s,
"synchrony",
corS$direction[corS$speaker==s & corS$type=="real"],
corS$r[corS$speaker==s & corS$type=="real"],
sum(!is.na(corS$sign[corS$speaker==s & corS$type=="mock"])==TRUE),
ifelse(corS$sign[corS$speaker==s & corS$type=="real"]=="*", "yes", "no"))
}
count <- merge(count, dat |> select(condition, speaker) |> filter(!duplicated(speaker)), by ="speaker")
count <- count |>
mutate_at("mock", as.numeric) |>
arrange(mock) |>
mutate(feature = "f0median",
section = "Lists",
real = case_when(
mock > 1 | is.na(real) ~ "no",
.default = as.character(real)
))
all <- rbind(all, count)
Below we see the 4 speakers whose correlation was significant between time and real f0 median differences AND who had no more than 1 significant correlation between time and randomly ordered f0 median.
One in the close condition synchronized and two in the close condition desynchronized. One in the impersonal condition desynchronized.
print(count)
## speaker type direction coefficient mock real condition
## 1 FWR-A synchrony synchrony -0.300563638303997 0 no close
## 2 FWR-B synchrony synchrony -0.0903338172619992 0 no close
## 3 FXO-A synchrony synchrony -0.063259892754004 0 no impersonal
## 4 FXO-B synchrony dyssynchrony 0.0779481438661408 0 no impersonal
## 5 HAG-B synchrony synchrony -0.04935367620688 0 no close
## 6 HUJ-A synchrony dyssynchrony 0.357298884252694 0 yes close
## 7 KDA-A synchrony dyssynchrony 0.29803616352212 0 no impersonal
## 8 KPB-A synchrony synchrony -0.242487684841087 0 no close
## 9 KPB-B synchrony dyssynchrony 0.0403473420771306 0 no close
## 10 MJG-A synchrony dyssynchrony 0.326241765898523 0 no close
## 11 MJG-B synchrony dyssynchrony 0.175598035954763 0 no close
## 12 OAL-A synchrony dyssynchrony 0.0144211222005726 0 no impersonal
## 13 OAL-B synchrony dyssynchrony 0.105738922640311 0 no impersonal
## 14 OXQ-A synchrony synchrony -0.0606462290828247 0 no impersonal
## 15 OXQ-B synchrony dyssynchrony 0.227580037512609 0 no impersonal
## 16 SUK-B synchrony synchrony -0.0105685543978161 0 no impersonal
## 17 TTN-A synchrony dyssynchrony 0.0560701443425562 0 no close
## 18 TTY-A synchrony synchrony -0.0800403479714815 0 no close
## 19 TTY-B synchrony synchrony -0.240791450468928 0 no close
## 20 VDE-B synchrony dyssynchrony 0.16117926018401 0 no impersonal
## 21 ZNV-A synchrony synchrony -0.246215269837491 0 no close
## 22 AML-A synchrony dyssynchrony 0.205251261385981 1 no impersonal
## 23 AML-B synchrony synchrony -0.180673728890839 1 no impersonal
## 24 HBR-A synchrony dyssynchrony 0.17579959866002 1 no impersonal
## 25 HBR-B synchrony dyssynchrony 0.167399575324429 1 no impersonal
## 26 HUJ-B synchrony synchrony -0.0311284685362079 1 no close
## 27 KDA-B synchrony synchrony -0.0300682517309385 1 no impersonal
## 28 NLO-B synchrony synchrony -0.0488085453207347 1 no close
## 29 QRT-A synchrony synchrony -0.390678503584988 1 yes close
## 30 QRT-B synchrony synchrony -0.154361931658257 1 no close
## 31 SGB-B synchrony synchrony -0.132313848153157 1 no impersonal
## 32 TTN-B synchrony synchrony -0.245953026473935 1 no close
## 33 VDE-A synchrony dyssynchrony 0.163771790713542 1 no impersonal
## 34 ZNV-B synchrony dyssynchrony 0.078874667911933 1 no close
## 35 HAG-A synchrony dyssynchrony 0.0842630443752847 2 no close
## 36 NLO-A synchrony dyssynchrony 0.427258755786233 2 no close
## 37 SGB-A synchrony synchrony -0.329082926590209 2 no impersonal
## 38 SUK-A synchrony dyssynchrony 0.0369339511639239 2 no impersonal
## feature section
## 1 f0median Lists
## 2 f0median Lists
## 3 f0median Lists
## 4 f0median Lists
## 5 f0median Lists
## 6 f0median Lists
## 7 f0median Lists
## 8 f0median Lists
## 9 f0median Lists
## 10 f0median Lists
## 11 f0median Lists
## 12 f0median Lists
## 13 f0median Lists
## 14 f0median Lists
## 15 f0median Lists
## 16 f0median Lists
## 17 f0median Lists
## 18 f0median Lists
## 19 f0median Lists
## 20 f0median Lists
## 21 f0median Lists
## 22 f0median Lists
## 23 f0median Lists
## 24 f0median Lists
## 25 f0median Lists
## 26 f0median Lists
## 27 f0median Lists
## 28 f0median Lists
## 29 f0median Lists
## 30 f0median Lists
## 31 f0median Lists
## 32 f0median Lists
## 33 f0median Lists
## 34 f0median Lists
## 35 f0median Lists
## 36 f0median Lists
## 37 f0median Lists
## 38 f0median Lists
ggplot(dat |> filter(task=="Diapix"), aes(f0medzGender, prevf0med))+
geom_point()+
geom_smooth(method="lm")+
facet_wrap(~speaker)
## `geom_smooth()` using formula = 'y ~ x'
## Warning: Removed 297 rows containing non-finite values (`stat_smooth()`).
## Warning: Removed 297 rows containing missing values (`geom_point()`).
corS <- data.frame(matrix(nrow=0, ncol=4))
names(corS) <- c("speaker", "type", "r", "p")
for(s in unique(dat$speaker)){
d <- dat |> filter(speaker==s, task=="Diapix")
if(nrow(d) ==0){next} # because we don't have the Diapix files for one dyad
c <- cor.test(d$f0medzGender, d$prevf0med,
alternative="two.sided", method="pearson")
corS[nrow(corS)+1,] <- c(s, "real", c$estimate, c$p.value)
for(i in 1:10){
c <- cor.test(d[,paste0("mockMed", i)], d$f0medzGender,
alternative="two.sided", method="pearson")
corS[nrow(corS)+1,] <- c(s, "mock", c$estimate, c$p.value)
}
}
corS <- corS |>
mutate(sign = ifelse(p < 0.05, "*", NA),
direction = case_when(
r < 0 ~ "synchrony",
r > 0 ~ "dyssynchrony"))
count <- data.frame(matrix(nrow=0, ncol=6))
names(count) <- c("speaker", "type", "direction", "coefficient", "mock", "real")
for(s in unique(corS$speaker)){
count[nrow(count)+1,] <- c(s,
"synchrony",
corS$direction[corS$speaker==s & corS$type=="real"],
corS$r[corS$speaker==s & corS$type=="real"],
sum(!is.na(corS$sign[corS$speaker==s & corS$type=="mock"])==TRUE),
ifelse(corS$sign[corS$speaker==s & corS$type=="real"]=="*", "yes", "no"))
}
count <- merge(count, dat |> select(condition, speaker) |> filter(!duplicated(speaker)), by ="speaker")
count <- count |>
mutate_at("mock", as.numeric) |>
arrange(mock) |>
mutate(feature = "f0median",
section = "Diapix",
real = case_when(
mock > 1 | is.na(real) ~ "no",
.default = as.character(real)
))
all <- rbind(all, count)
Below we see the 3 speakers whose correlation was significant between time and real f0 median differences AND who had no more than 1 significant correlation between time and randomly ordered f0 median.
All three desynchronized. One in the close condition and two in the impersonal condition.
print(count)
## speaker type direction coefficient mock real condition
## 1 AML-B synchrony synchrony -0.111397648658917 0 no impersonal
## 2 FXO-B synchrony dyssynchrony 0.155415765102268 0 no impersonal
## 3 HAG-A synchrony dyssynchrony 0.136821613219354 0 no close
## 4 HAG-B synchrony synchrony -0.0857910037843134 0 no close
## 5 HBR-A synchrony synchrony -0.053747580897934 0 no impersonal
## 6 HUJ-A synchrony dyssynchrony 0.148127454501519 0 no close
## 7 HUJ-B synchrony dyssynchrony 0.127850385010129 0 no close
## 8 KDA-A synchrony dyssynchrony 0.00845859250152412 0 no impersonal
## 9 KDA-B synchrony synchrony -0.0657278051300379 0 no impersonal
## 10 KPB-B synchrony dyssynchrony 0.139222654174925 0 no close
## 11 MJG-A synchrony dyssynchrony 0.0805659334777255 0 no close
## 12 NLO-A synchrony dyssynchrony 0.125411269271292 0 no close
## 13 NLO-B synchrony dyssynchrony 0.0512109354640375 0 no close
## 14 OAL-B synchrony dyssynchrony 0.109543374380144 0 no impersonal
## 15 OXQ-A synchrony dyssynchrony 0.0897795595989093 0 no impersonal
## 16 OXQ-B synchrony synchrony -0.00720796902206883 0 no impersonal
## 17 QRT-B synchrony synchrony -0.00563172603242151 0 no close
## 18 SUK-A synchrony dyssynchrony 0.183856357146092 0 yes impersonal
## 19 SUK-B synchrony dyssynchrony 0.21883773693464 0 yes impersonal
## 20 TTN-A synchrony dyssynchrony 0.0200452345903258 0 no close
## 21 TTY-A synchrony synchrony -0.0671341631951863 0 no close
## 22 TTY-B synchrony dyssynchrony 0.0247270558802934 0 no close
## 23 VDE-A synchrony synchrony -0.0386510029682614 0 no impersonal
## 24 ZNV-A synchrony dyssynchrony 0.342883979389005 0 yes close
## 25 AML-A synchrony synchrony -0.0503773929497733 1 no impersonal
## 26 FXO-A synchrony dyssynchrony 0.179843226600184 1 no impersonal
## 27 HBR-B synchrony dyssynchrony 0.132352368457503 1 no impersonal
## 28 KPB-A synchrony dyssynchrony 0.088974228076865 1 no close
## 29 MJG-B synchrony synchrony -0.00998674300858751 1 no close
## 30 QRT-A synchrony synchrony -0.00214849953654202 1 no close
## 31 SGB-A synchrony dyssynchrony 0.0626848133635434 1 no impersonal
## 32 SGB-B synchrony synchrony -0.0119655000463638 1 no impersonal
## 33 TTN-B synchrony synchrony -0.0171964234067967 1 no close
## 34 ZNV-B synchrony synchrony -0.115007287539743 1 no close
## 35 VDE-B synchrony dyssynchrony 0.103111017933215 2 no impersonal
## 36 OAL-A synchrony dyssynchrony 0.116673568921843 4 no impersonal
## feature section
## 1 f0median Diapix
## 2 f0median Diapix
## 3 f0median Diapix
## 4 f0median Diapix
## 5 f0median Diapix
## 6 f0median Diapix
## 7 f0median Diapix
## 8 f0median Diapix
## 9 f0median Diapix
## 10 f0median Diapix
## 11 f0median Diapix
## 12 f0median Diapix
## 13 f0median Diapix
## 14 f0median Diapix
## 15 f0median Diapix
## 16 f0median Diapix
## 17 f0median Diapix
## 18 f0median Diapix
## 19 f0median Diapix
## 20 f0median Diapix
## 21 f0median Diapix
## 22 f0median Diapix
## 23 f0median Diapix
## 24 f0median Diapix
## 25 f0median Diapix
## 26 f0median Diapix
## 27 f0median Diapix
## 28 f0median Diapix
## 29 f0median Diapix
## 30 f0median Diapix
## 31 f0median Diapix
## 32 f0median Diapix
## 33 f0median Diapix
## 34 f0median Diapix
## 35 f0median Diapix
## 36 f0median Diapix
ggplot(dat, aes(f0maxzGender, prevf0max))+
geom_point()+
geom_smooth(method="lm")+
facet_wrap(~speaker)
## `geom_smooth()` using formula = 'y ~ x'
## Warning: Removed 288 rows containing non-finite values (`stat_smooth()`).
## Warning: Removed 288 rows containing missing values (`geom_point()`).
corS <- data.frame(matrix(nrow=0, ncol=4))
names(corS) <- c("speaker", "type", "r", "p")
for(s in unique(dat$speaker)){
d <- dat |> filter(speaker==s)
c <- cor.test(d$f0maxzGender, d$prevf0max,
alternative="two.sided", method="pearson")
corS[nrow(corS)+1,] <- c(s, "real", c$estimate, c$p.value)
for(i in 1:10){
c <- cor.test(d[,paste0("mockMax", i)], d$f0maxzGender,
alternative="two.sided", method="pearson")
corS[nrow(corS)+1,] <- c(s, "mock", c$estimate, c$p.value)
}
}
corS <- corS |>
mutate(sign = ifelse(p < 0.05, "*", NA),
direction = case_when(
r < 0 ~ "synchrony",
r > 0 ~ "dyssynchrony"))
count <- data.frame(matrix(nrow=0, ncol=6))
names(count) <- c("speaker", "type", "direction", "coefficient", "mock", "real")
for(s in unique(corS$speaker)){
count[nrow(count)+1,] <- c(s,
"synchrony",
corS$direction[corS$speaker==s & corS$type=="real"],
corS$r[corS$speaker==s & corS$type=="real"],
sum(!is.na(corS$sign[corS$speaker==s & corS$type=="mock"])==TRUE),
ifelse(corS$sign[corS$speaker==s & corS$type=="real"]=="*", "yes", "no"))
}
count <- merge(count, dat |> select(condition, speaker) |> filter(!duplicated(speaker)), by ="speaker")
count <- count |>
mutate_at("mock", as.numeric) |>
arrange(mock) |>
mutate(feature = "f0max",
section = "entireExp",
real = case_when(
mock > 1 | is.na(real) ~ "no",
.default = as.character(real)
))
all <- rbind(all, count)
Below we see the 8 speakers whose correlation was significant between time and real f0 median differences AND who had no more than 1 significant correlation between time and randomly ordered f0 median.
All desynchronized. Two in the close condition and six in the impersonal condition.
print(count)
## speaker type direction coefficient mock real condition
## 1 AML-A synchrony dyssynchrony 0.0416290849805265 0 no impersonal
## 2 HBR-A synchrony dyssynchrony 0.0876169357960696 0 no impersonal
## 3 HUJ-A synchrony dyssynchrony 0.034557190796969 0 no close
## 4 KPB-A synchrony dyssynchrony 0.168192467942173 0 no close
## 5 KPB-B synchrony synchrony -0.011328666437045 0 no close
## 6 NLO-A synchrony dyssynchrony 0.255974952041841 0 yes close
## 7 OAL-A synchrony dyssynchrony 0.221544511147051 0 yes impersonal
## 8 OXQ-A synchrony synchrony -0.0131916423013826 0 no impersonal
## 9 OXQ-B synchrony dyssynchrony 0.175236749050539 0 yes impersonal
## 10 QRT-A synchrony dyssynchrony 0.120857015665861 0 no close
## 11 QRT-B synchrony synchrony -0.024401104268546 0 no close
## 12 SGB-A synchrony dyssynchrony 0.0840549317421198 0 no impersonal
## 13 SGB-B synchrony synchrony -0.0186687176452995 0 no impersonal
## 14 SUK-B synchrony dyssynchrony 0.130713222242466 0 no impersonal
## 15 TTY-A synchrony dyssynchrony 0.202849083353503 0 yes close
## 16 VDE-A synchrony dyssynchrony 0.126090128945892 0 no impersonal
## 17 ZNV-B synchrony synchrony -0.0212148801392571 0 no close
## 18 AML-B synchrony dyssynchrony 0.25550299231443 1 yes impersonal
## 19 FWR-A synchrony dyssynchrony 0.00488048850626231 1 no close
## 20 FWR-B synchrony synchrony -0.458183982035592 1 no close
## 21 HAG-B synchrony dyssynchrony 0.0119834422507738 1 no close
## 22 HBR-B synchrony dyssynchrony 0.0243226567285494 1 no impersonal
## 23 KDA-A synchrony dyssynchrony 0.0238123332891506 1 no impersonal
## 24 MJG-A synchrony dyssynchrony 0.03242382587976 1 no close
## 25 OAL-B synchrony dyssynchrony 0.0802037875378496 1 no impersonal
## 26 TTN-A synchrony dyssynchrony 0.182537194609077 1 yes close
## 27 TTN-B synchrony dyssynchrony 0.113839227852142 1 no close
## 28 VDE-B synchrony dyssynchrony 0.182133613328821 1 yes impersonal
## 29 ZNV-A synchrony dyssynchrony 0.0415904824426567 1 no close
## 30 FXO-A synchrony dyssynchrony 0.126035724283629 2 no impersonal
## 31 FXO-B synchrony dyssynchrony 0.212665765727217 2 no impersonal
## 32 HUJ-B synchrony dyssynchrony 0.0158766721929422 2 no close
## 33 KDA-B synchrony dyssynchrony 0.0801031840038905 2 no impersonal
## 34 MJG-B synchrony synchrony -0.0536695411162715 2 no close
## 35 NLO-B synchrony dyssynchrony 0.145453514431542 2 no close
## 36 SUK-A synchrony dyssynchrony 0.101159077096394 2 no impersonal
## 37 TTY-B synchrony dyssynchrony 0.108886755543673 2 no close
## 38 HAG-A synchrony synchrony -0.0652759110843368 3 no close
## feature section
## 1 f0max entireExp
## 2 f0max entireExp
## 3 f0max entireExp
## 4 f0max entireExp
## 5 f0max entireExp
## 6 f0max entireExp
## 7 f0max entireExp
## 8 f0max entireExp
## 9 f0max entireExp
## 10 f0max entireExp
## 11 f0max entireExp
## 12 f0max entireExp
## 13 f0max entireExp
## 14 f0max entireExp
## 15 f0max entireExp
## 16 f0max entireExp
## 17 f0max entireExp
## 18 f0max entireExp
## 19 f0max entireExp
## 20 f0max entireExp
## 21 f0max entireExp
## 22 f0max entireExp
## 23 f0max entireExp
## 24 f0max entireExp
## 25 f0max entireExp
## 26 f0max entireExp
## 27 f0max entireExp
## 28 f0max entireExp
## 29 f0max entireExp
## 30 f0max entireExp
## 31 f0max entireExp
## 32 f0max entireExp
## 33 f0max entireExp
## 34 f0max entireExp
## 35 f0max entireExp
## 36 f0max entireExp
## 37 f0max entireExp
## 38 f0max entireExp
ggplot(dat |> filter(task=="Lists"), aes(f0maxzGender, prevf0max))+
geom_point()+
geom_smooth(method="lm")+
facet_wrap(~speaker)
## `geom_smooth()` using formula = 'y ~ x'
## Warning: Removed 97 rows containing non-finite values (`stat_smooth()`).
## Warning: Removed 97 rows containing missing values (`geom_point()`).
corS <- data.frame(matrix(nrow=0, ncol=4))
names(corS) <- c("speaker", "type", "r", "p")
for(s in unique(dat$speaker)){
d <- dat |> filter(speaker==s, task=="Lists")
c <- cor.test(d$f0maxzGender, d$prevf0max,
alternative="two.sided", method="pearson")
corS[nrow(corS)+1,] <- c(s, "real", c$estimate, c$p.value)
for(i in 1:10){
c <- cor.test(d[,paste0("mockMax", i)], d$f0maxzGender,
alternative="two.sided", method="pearson")
corS[nrow(corS)+1,] <- c(s, "mock", c$estimate, c$p.value)
}
}
corS <- corS |>
mutate(sign = ifelse(p < 0.05, "*", NA),
direction = case_when(
r < 0 ~ "synchrony",
r > 0 ~ "dyssynchrony"))
count <- data.frame(matrix(nrow=0, ncol=6))
names(count) <- c("speaker", "type", "direction", "coefficient", "mock", "real")
for(s in unique(corS$speaker)){
count[nrow(count)+1,] <- c(s,
"synchrony",
corS$direction[corS$speaker==s & corS$type=="real"],
corS$r[corS$speaker==s & corS$type=="real"],
sum(!is.na(corS$sign[corS$speaker==s & corS$type=="mock"])==TRUE),
ifelse(corS$sign[corS$speaker==s & corS$type=="real"]=="*", "yes", "no"))
}
count <- merge(count, dat |> select(condition, speaker) |> filter(!duplicated(speaker)), by ="speaker")
count <- count |>
mutate_at("mock", as.numeric) |>
arrange(mock) |>
mutate(feature = "f0max",
section = "Lists",
real = case_when(
mock > 1 | is.na(real) ~ "no",
.default = as.character(real)
))
all <- rbind(all, count)
Below we see the only speaker whose correlation was significant between time and real f0 median differences AND who had no more than 1 significant correlation between time and randomly ordered f0 median.
She was in the impersonal condition and desynchronized.
print(count)
## speaker type direction coefficient mock real condition
## 1 AML-A synchrony dyssynchrony 0.141164436287923 0 no impersonal
## 2 AML-B synchrony dyssynchrony 0.286064558589535 0 no impersonal
## 3 FXO-B synchrony dyssynchrony 0.142735223250187 0 no impersonal
## 4 HBR-A synchrony dyssynchrony 0.0395786727064874 0 no impersonal
## 5 HBR-B synchrony dyssynchrony 0.0809436405355996 0 no impersonal
## 6 KPB-A synchrony dyssynchrony 0.221428089361738 0 no close
## 7 KPB-B synchrony synchrony -0.125722531809469 0 no close
## 8 MJG-A synchrony synchrony -0.0892656125884824 0 no close
## 9 NLO-A synchrony synchrony -0.12147251220541 0 no close
## 10 OAL-A synchrony dyssynchrony 0.263851732546551 0 no impersonal
## 11 OAL-B synchrony dyssynchrony 0.044274566049483 0 no impersonal
## 12 OXQ-B synchrony synchrony -0.0921078693092518 0 no impersonal
## 13 QRT-A synchrony dyssynchrony 0.257713557270937 0 no close
## 14 SUK-B synchrony dyssynchrony 0.200574888337046 0 no impersonal
## 15 TTN-B synchrony dyssynchrony 0.0557646248209323 0 no close
## 16 VDE-A synchrony dyssynchrony 0.33594454744154 0 yes impersonal
## 17 VDE-B synchrony dyssynchrony 0.231721334652014 0 no impersonal
## 18 ZNV-A synchrony dyssynchrony 0.0238191058055579 0 no close
## 19 ZNV-B synchrony synchrony -0.299219279409205 0 no close
## 20 FWR-A synchrony dyssynchrony 0.00488048850626231 1 no close
## 21 FWR-B synchrony synchrony -0.458183982035592 1 no close
## 22 FXO-A synchrony synchrony -0.0438226314347165 1 no impersonal
## 23 HAG-A synchrony synchrony -0.164418703093826 1 no close
## 24 HAG-B synchrony synchrony -0.250571843644055 1 no close
## 25 HUJ-A synchrony dyssynchrony 0.0924757706836303 1 no close
## 26 OXQ-A synchrony synchrony -0.0983050587633334 1 no impersonal
## 27 QRT-B synchrony dyssynchrony 0.217659700033205 1 no close
## 28 SGB-A synchrony synchrony -0.0795490567607825 1 no impersonal
## 29 SGB-B synchrony dyssynchrony 0.142681325407843 1 no impersonal
## 30 SUK-A synchrony synchrony -0.0450230251337383 1 no impersonal
## 31 TTN-A synchrony dyssynchrony 0.0808854876316062 1 no close
## 32 MJG-B synchrony synchrony -0.00603329694532394 2 no close
## 33 NLO-B synchrony dyssynchrony 0.00975205123211711 2 no close
## 34 TTY-A synchrony dyssynchrony 0.35970500423317 2 no close
## 35 TTY-B synchrony synchrony -0.159978937047311 2 no close
## 36 HUJ-B synchrony synchrony -0.200099113333065 3 no close
## 37 KDA-B synchrony dyssynchrony 0.547561911098476 6 no impersonal
## 38 KDA-A synchrony dyssynchrony 0.506831733385603 7 no impersonal
## feature section
## 1 f0max Lists
## 2 f0max Lists
## 3 f0max Lists
## 4 f0max Lists
## 5 f0max Lists
## 6 f0max Lists
## 7 f0max Lists
## 8 f0max Lists
## 9 f0max Lists
## 10 f0max Lists
## 11 f0max Lists
## 12 f0max Lists
## 13 f0max Lists
## 14 f0max Lists
## 15 f0max Lists
## 16 f0max Lists
## 17 f0max Lists
## 18 f0max Lists
## 19 f0max Lists
## 20 f0max Lists
## 21 f0max Lists
## 22 f0max Lists
## 23 f0max Lists
## 24 f0max Lists
## 25 f0max Lists
## 26 f0max Lists
## 27 f0max Lists
## 28 f0max Lists
## 29 f0max Lists
## 30 f0max Lists
## 31 f0max Lists
## 32 f0max Lists
## 33 f0max Lists
## 34 f0max Lists
## 35 f0max Lists
## 36 f0max Lists
## 37 f0max Lists
## 38 f0max Lists
ggplot(dat |> filter(task=="Diapix"), aes(f0maxzGender, prevf0max))+
geom_point()+
geom_smooth(method="lm")+
facet_wrap(~speaker)
## `geom_smooth()` using formula = 'y ~ x'
## Warning: Removed 191 rows containing non-finite values (`stat_smooth()`).
## Warning: Removed 191 rows containing missing values (`geom_point()`).
corS <- data.frame(matrix(nrow=0, ncol=4))
names(corS) <- c("speaker", "type", "r", "p")
for(s in unique(dat$speaker)){
d <- dat |> filter(speaker==s, task=="Diapix")
if(nrow(d) ==0){next} # because we don't have the Diapix files for one dyad
c <- cor.test(d$f0maxzGender, d$prevf0max,
alternative="two.sided", method="pearson")
corS[nrow(corS)+1,] <- c(s, "real", c$estimate, c$p.value)
for(i in 1:10){
c <- cor.test(d[,paste0("mockMax", i)], d$f0maxzGender,
alternative="two.sided", method="pearson")
corS[nrow(corS)+1,] <- c(s, "mock", c$estimate, c$p.value)
}
}
corS <- corS |>
mutate(sign = ifelse(p < 0.05, "*", NA),
direction = case_when(
r < 0 ~ "synchrony",
r > 0 ~ "dyssynchrony"))
count <- data.frame(matrix(nrow=0, ncol=6))
names(count) <- c("speaker", "type", "direction", "coefficient", "mock", "real")
for(s in unique(corS$speaker)){
count[nrow(count)+1,] <- c(s,
"synchrony",
corS$direction[corS$speaker==s & corS$type=="real"],
corS$r[corS$speaker==s & corS$type=="real"],
sum(!is.na(corS$sign[corS$speaker==s & corS$type=="mock"])==TRUE),
ifelse(corS$sign[corS$speaker==s & corS$type=="real"]=="*", "yes", "no"))
}
count <- merge(count, dat |> select(condition, speaker) |> filter(!duplicated(speaker)), by ="speaker")
count <- count |>
mutate_at("mock", as.numeric) |>
arrange(mock) |>
mutate(feature = "f0max",
section = "Diapix",
real = case_when(
mock > 1 | is.na(real) ~ "no",
.default = as.character(real)
))
all <- rbind(all, count)
Below we see the 3 speakers whose correlation was significant between time and real f0 median differences AND who had no more than 1 significant correlation between time and randomly ordered f0 median.
All desynchronized, one in the close and two in the impersonal condition.
print(count)
## speaker type direction coefficient mock real condition
## 1 AML-B synchrony dyssynchrony 0.231186577537179 0 yes impersonal
## 2 FXO-A synchrony dyssynchrony 0.0309467662641052 0 no impersonal
## 3 HAG-B synchrony dyssynchrony 0.0424697881468647 0 no close
## 4 HBR-A synchrony dyssynchrony 0.102811584240309 0 no impersonal
## 5 HUJ-A synchrony dyssynchrony 0.00336018202987396 0 no close
## 6 KDA-B synchrony synchrony -0.0349554159592185 0 no impersonal
## 7 KPB-A synchrony dyssynchrony 0.201144438843265 0 no close
## 8 KPB-B synchrony dyssynchrony 0.00835090643953848 0 no close
## 9 MJG-B synchrony synchrony -0.0680501305177131 0 no close
## 10 NLO-A synchrony dyssynchrony 0.338057672770634 0 yes close
## 11 OAL-A synchrony dyssynchrony 0.123090608957763 0 no impersonal
## 12 OXQ-A synchrony dyssynchrony 0.0345958594057422 0 no impersonal
## 13 OXQ-B synchrony dyssynchrony 0.30774262109949 0 yes impersonal
## 14 QRT-A synchrony dyssynchrony 0.0452485126722081 0 no close
## 15 QRT-B synchrony synchrony -0.117308849293399 0 no close
## 16 SGB-A synchrony dyssynchrony 0.0845694521802394 0 no impersonal
## 17 SGB-B synchrony synchrony -0.0525138912761415 0 no impersonal
## 18 SUK-B synchrony dyssynchrony 0.105829005808465 0 no impersonal
## 19 TTN-A synchrony dyssynchrony 0.237696697413486 0 yes close
## 20 TTY-A synchrony dyssynchrony 0.127492156571264 0 no close
## 21 VDE-A synchrony dyssynchrony 0.0996299439856212 0 no impersonal
## 22 VDE-B synchrony dyssynchrony 0.162886734111321 0 no impersonal
## 23 ZNV-B synchrony dyssynchrony 0.114597517731536 0 no close
## 24 AML-A synchrony synchrony -0.00216358854450929 1 no impersonal
## 25 FXO-B synchrony synchrony -0.0145082806110858 1 no impersonal
## 26 HAG-A synchrony synchrony -0.110860198592555 1 no close
## 27 HBR-B synchrony synchrony -0.016550700900653 1 no impersonal
## 28 HUJ-B synchrony dyssynchrony 0.159576793779983 1 no close
## 29 KDA-A synchrony synchrony -0.12543875849989 1 no impersonal
## 30 MJG-A synchrony dyssynchrony 0.0436207296924935 1 no close
## 31 OAL-B synchrony dyssynchrony 0.0534729193217042 1 no impersonal
## 32 TTN-B synchrony dyssynchrony 0.105651994592981 1 no close
## 33 TTY-B synchrony dyssynchrony 0.185729023598761 1 no close
## 34 ZNV-A synchrony dyssynchrony 0.122262081306364 1 no close
## 35 NLO-B synchrony dyssynchrony 0.167318687271417 2 no close
## 36 SUK-A synchrony dyssynchrony 0.109444786824869 2 no impersonal
## feature section
## 1 f0max Diapix
## 2 f0max Diapix
## 3 f0max Diapix
## 4 f0max Diapix
## 5 f0max Diapix
## 6 f0max Diapix
## 7 f0max Diapix
## 8 f0max Diapix
## 9 f0max Diapix
## 10 f0max Diapix
## 11 f0max Diapix
## 12 f0max Diapix
## 13 f0max Diapix
## 14 f0max Diapix
## 15 f0max Diapix
## 16 f0max Diapix
## 17 f0max Diapix
## 18 f0max Diapix
## 19 f0max Diapix
## 20 f0max Diapix
## 21 f0max Diapix
## 22 f0max Diapix
## 23 f0max Diapix
## 24 f0max Diapix
## 25 f0max Diapix
## 26 f0max Diapix
## 27 f0max Diapix
## 28 f0max Diapix
## 29 f0max Diapix
## 30 f0max Diapix
## 31 f0max Diapix
## 32 f0max Diapix
## 33 f0max Diapix
## 34 f0max Diapix
## 35 f0max Diapix
## 36 f0max Diapix
ggplot(dat, aes(f0sdzGender, prevf0sd))+
geom_point()+
geom_smooth(method="lm")+
facet_wrap(~speaker)
## `geom_smooth()` using formula = 'y ~ x'
## Warning: Removed 575 rows containing non-finite values (`stat_smooth()`).
## Warning: Removed 575 rows containing missing values (`geom_point()`).
corS <- data.frame(matrix(nrow=0, ncol=4))
names(corS) <- c("speaker", "type", "r", "p")
for(s in unique(dat$speaker)){
d <- dat |> filter(speaker==s)
c <- cor.test(d$f0sdzGender, d$prevf0sd,
alternative="two.sided", method="pearson")
corS[nrow(corS)+1,] <- c(s, "real", c$estimate, c$p.value)
for(i in 1:10){
c <- cor.test(d[,paste0("mockSd", i)], d$f0sdzGender,
alternative="two.sided", method="pearson")
corS[nrow(corS)+1,] <- c(s, "mock", c$estimate, c$p.value)
}
}
corS <- corS |>
mutate(sign = ifelse(p < 0.05, "*", NA),
direction = case_when(
r < 0 ~ "synchrony",
r > 0 ~ "dyssynchrony"))
count <- data.frame(matrix(nrow=0, ncol=6))
names(count) <- c("speaker", "type", "direction", "coefficient", "mock", "real")
for(s in unique(corS$speaker)){
count[nrow(count)+1,] <- c(s,
"synchrony",
corS$direction[corS$speaker==s & corS$type=="real"],
corS$r[corS$speaker==s & corS$type=="real"],
sum(!is.na(corS$sign[corS$speaker==s & corS$type=="mock"])==TRUE),
ifelse(corS$sign[corS$speaker==s & corS$type=="real"]=="*", "yes", "no"))
}
count <- merge(count, dat |> select(condition, speaker) |> filter(!duplicated(speaker)), by ="speaker")
count <- count |>
mutate_at("mock", as.numeric) |>
arrange(mock) |>
mutate(feature = "f0sd",
section = "entireExp",
real = case_when(
mock > 1 | is.na(real) ~ "no",
.default = as.character(real)
))
all <- rbind(all, count)
Below we see the 5 speakers whose correlation was significant between time and real f0 median differences AND who had no more than 1 significant correlation between time and randomly ordered f0 median.
One synchronized (close condition). Four desynchronized (two close and two impersonal condition).
print(count)
## speaker type direction coefficient mock real condition
## 1 AML-A synchrony dyssynchrony 0.153778116855479 0 no impersonal
## 2 FWR-B synchrony synchrony -0.0324447261389338 0 no close
## 3 HAG-A synchrony synchrony -0.143766052888694 0 no close
## 4 HAG-B synchrony dyssynchrony 0.101313108390594 0 no close
## 5 KPB-B synchrony dyssynchrony 0.0541386350246041 0 no close
## 6 MJG-B synchrony dyssynchrony 0.0669785567362788 0 no close
## 7 NLO-A synchrony dyssynchrony 0.233737973133155 0 yes close
## 8 OAL-A synchrony dyssynchrony 0.140931898052414 0 no impersonal
## 9 OXQ-A synchrony synchrony -0.064906034559461 0 no impersonal
## 10 OXQ-B synchrony dyssynchrony 0.0927476551496673 0 no impersonal
## 11 QRT-A synchrony dyssynchrony 0.0513261627179744 0 no close
## 12 QRT-B synchrony synchrony -0.0766753934850746 0 no close
## 13 SGB-A synchrony dyssynchrony 0.0973015422340224 0 no impersonal
## 14 SGB-B synchrony dyssynchrony 0.0459369140109435 0 no impersonal
## 15 SUK-B synchrony dyssynchrony 0.065794034576589 0 no impersonal
## 16 TTN-A synchrony dyssynchrony 0.237874092353268 0 yes close
## 17 TTY-A synchrony dyssynchrony 0.124730222992609 0 no close
## 18 ZNV-A synchrony synchrony -0.061237698977345 0 no close
## 19 AML-B synchrony dyssynchrony 0.201950622475474 1 yes impersonal
## 20 FXO-A synchrony dyssynchrony 0.15031168921442 1 no impersonal
## 21 FXO-B synchrony dyssynchrony 0.156992144420927 1 no impersonal
## 22 HBR-A synchrony dyssynchrony 0.128832131269147 1 no impersonal
## 23 HUJ-A synchrony dyssynchrony 0.0638984110581506 1 no close
## 24 HUJ-B synchrony dyssynchrony 0.0469235220894952 1 no close
## 25 KDA-A synchrony synchrony -0.0178123859239933 1 no impersonal
## 26 KDA-B synchrony synchrony -0.115054447034382 1 no impersonal
## 27 KPB-A synchrony dyssynchrony 0.0300078944661093 1 no close
## 28 NLO-B synchrony synchrony -0.0244629622292923 1 no close
## 29 OAL-B synchrony dyssynchrony 0.141088237854238 1 no impersonal
## 30 SUK-A synchrony dyssynchrony 0.127197201703565 1 no impersonal
## 31 TTN-B synchrony dyssynchrony 0.0524129574686659 1 no close
## 32 TTY-B synchrony synchrony -0.145003218983133 1 no close
## 33 VDE-A synchrony dyssynchrony 0.23120492741628 1 yes impersonal
## 34 ZNV-B synchrony dyssynchrony 0.0683674782309375 1 no close
## 35 FWR-A synchrony dyssynchrony 0.0675572805781528 2 no close
## 36 HBR-B synchrony dyssynchrony 0.0934297243675509 2 no impersonal
## 37 MJG-A synchrony dyssynchrony 0.0251522232559881 2 no close
## 38 VDE-B synchrony dyssynchrony 0.152908863291665 4 no impersonal
## feature section
## 1 f0sd entireExp
## 2 f0sd entireExp
## 3 f0sd entireExp
## 4 f0sd entireExp
## 5 f0sd entireExp
## 6 f0sd entireExp
## 7 f0sd entireExp
## 8 f0sd entireExp
## 9 f0sd entireExp
## 10 f0sd entireExp
## 11 f0sd entireExp
## 12 f0sd entireExp
## 13 f0sd entireExp
## 14 f0sd entireExp
## 15 f0sd entireExp
## 16 f0sd entireExp
## 17 f0sd entireExp
## 18 f0sd entireExp
## 19 f0sd entireExp
## 20 f0sd entireExp
## 21 f0sd entireExp
## 22 f0sd entireExp
## 23 f0sd entireExp
## 24 f0sd entireExp
## 25 f0sd entireExp
## 26 f0sd entireExp
## 27 f0sd entireExp
## 28 f0sd entireExp
## 29 f0sd entireExp
## 30 f0sd entireExp
## 31 f0sd entireExp
## 32 f0sd entireExp
## 33 f0sd entireExp
## 34 f0sd entireExp
## 35 f0sd entireExp
## 36 f0sd entireExp
## 37 f0sd entireExp
## 38 f0sd entireExp
ggplot(dat |> filter(task=="Lists"), aes(f0sdzGender, prevf0sd))+
geom_point()+
geom_smooth(method="lm")+
facet_wrap(~speaker)
## `geom_smooth()` using formula = 'y ~ x'
## Warning: Removed 152 rows containing non-finite values (`stat_smooth()`).
## Warning: Removed 152 rows containing missing values (`geom_point()`).
corS <- data.frame(matrix(nrow=0, ncol=4))
names(corS) <- c("speaker", "type", "r", "p")
for(s in unique(dat$speaker)){
d <- dat |> filter(speaker==s, task=="Lists")
c <- cor.test(d$f0sdzGender, d$prevf0sd,
alternative="two.sided", method="pearson")
corS[nrow(corS)+1,] <- c(s, "real", c$estimate, c$p.value)
for(i in 1:10){
c <- cor.test(d[,paste0("mockSd", i)], d$f0sdzGender,
alternative="two.sided", method="pearson")
corS[nrow(corS)+1,] <- c(s, "mock", c$estimate, c$p.value)
}
}
corS <- corS |>
mutate(sign = ifelse(p < 0.05, "*", NA),
direction = case_when(
r < 0 ~ "synchrony",
r > 0 ~ "dyssynchrony"))
count <- data.frame(matrix(nrow=0, ncol=6))
names(count) <- c("speaker", "type", "direction", "coefficient", "mock", "real")
for(s in unique(corS$speaker)){
count[nrow(count)+1,] <- c(s,
"synchrony",
corS$direction[corS$speaker==s & corS$type=="real"],
corS$r[corS$speaker==s & corS$type=="real"],
sum(!is.na(corS$sign[corS$speaker==s & corS$type=="mock"])==TRUE),
ifelse(corS$sign[corS$speaker==s & corS$type=="real"]=="*", "yes", "no"))
}
count <- merge(count, dat |> select(condition, speaker) |> filter(!duplicated(speaker)), by ="speaker")
count <- count |>
mutate_at("mock", as.numeric) |>
arrange(mock) |>
mutate(feature = "f0sd",
section = "Lists",
real = case_when(
mock > 1 | is.na(real) ~ "no",
.default = as.character(real)
))
all <- rbind(all, count)
Below we see the 2 speakers whose correlation was significant between time and real f0 median differences AND who had no more than 1 significant correlation between time and randomly ordered f0 median.
Both desynchronized and were in the close condition.
print(count)
## speaker type direction coefficient mock real condition
## 1 AML-A synchrony dyssynchrony 0.0611655649472113 0 no impersonal
## 2 AML-B synchrony dyssynchrony 0.30879429857356 0 yes impersonal
## 3 FWR-B synchrony synchrony -0.0324447261389338 0 no close
## 4 FXO-B synchrony dyssynchrony 0.144170396842999 0 no impersonal
## 5 HAG-B synchrony synchrony -0.0881146231300642 0 no close
## 6 HUJ-A synchrony synchrony -0.120845396418935 0 no close
## 7 HUJ-B synchrony synchrony -0.00122613312587873 0 no close
## 8 KPB-A synchrony dyssynchrony 0.216017865393854 0 no close
## 9 KPB-B synchrony dyssynchrony 0.00547995821039961 0 no close
## 10 MJG-A synchrony synchrony -0.199425800143578 0 no close
## 11 NLO-B synchrony dyssynchrony 0.348000337617956 0 no close
## 12 OXQ-B synchrony dyssynchrony 0.00823109981076577 0 no impersonal
## 13 SGB-B synchrony dyssynchrony 0.00640269094968847 0 no impersonal
## 14 SUK-A synchrony synchrony -0.200323911932563 0 no impersonal
## 15 SUK-B synchrony dyssynchrony 0.25552836130833 0 no impersonal
## 16 TTN-A synchrony dyssynchrony 0.135266323710738 0 no close
## 17 TTY-A synchrony dyssynchrony 0.424723946652559 0 yes close
## 18 TTY-B synchrony synchrony -0.0831448789691296 0 no close
## 19 FXO-A synchrony synchrony -0.075791288630783 1 no impersonal
## 20 HAG-A synchrony synchrony -0.404067610268608 1 yes close
## 21 HBR-A synchrony dyssynchrony 0.00766945422939426 1 no impersonal
## 22 HBR-B synchrony synchrony -0.222562021258751 1 no impersonal
## 23 MJG-B synchrony dyssynchrony 0.241740773762611 1 no close
## 24 NLO-A synchrony synchrony -0.106304536899728 1 no close
## 25 OAL-A synchrony dyssynchrony 0.0295696473878664 1 no impersonal
## 26 OAL-B synchrony synchrony -0.000262610315730354 1 no impersonal
## 27 QRT-A synchrony synchrony -0.164210783239977 1 no close
## 28 QRT-B synchrony dyssynchrony 0.253569805339333 1 no close
## 29 SGB-A synchrony synchrony -0.0278966219632956 1 no impersonal
## 30 TTN-B synchrony dyssynchrony 0.187019438332667 1 no close
## 31 VDE-B synchrony dyssynchrony 0.0728138821854779 1 no impersonal
## 32 ZNV-A synchrony dyssynchrony 0.09751919262334 1 no close
## 33 ZNV-B synchrony synchrony -0.254309421919819 1 no close
## 34 FWR-A synchrony dyssynchrony 0.0675572805781528 2 no close
## 35 KDA-A synchrony dyssynchrony 0.46078306524803 2 no impersonal
## 36 KDA-B synchrony dyssynchrony 0.323983027691163 2 no impersonal
## 37 OXQ-A synchrony synchrony -0.163513678774864 2 no impersonal
## 38 VDE-A synchrony dyssynchrony 0.103449969897939 2 no impersonal
## feature section
## 1 f0sd Lists
## 2 f0sd Lists
## 3 f0sd Lists
## 4 f0sd Lists
## 5 f0sd Lists
## 6 f0sd Lists
## 7 f0sd Lists
## 8 f0sd Lists
## 9 f0sd Lists
## 10 f0sd Lists
## 11 f0sd Lists
## 12 f0sd Lists
## 13 f0sd Lists
## 14 f0sd Lists
## 15 f0sd Lists
## 16 f0sd Lists
## 17 f0sd Lists
## 18 f0sd Lists
## 19 f0sd Lists
## 20 f0sd Lists
## 21 f0sd Lists
## 22 f0sd Lists
## 23 f0sd Lists
## 24 f0sd Lists
## 25 f0sd Lists
## 26 f0sd Lists
## 27 f0sd Lists
## 28 f0sd Lists
## 29 f0sd Lists
## 30 f0sd Lists
## 31 f0sd Lists
## 32 f0sd Lists
## 33 f0sd Lists
## 34 f0sd Lists
## 35 f0sd Lists
## 36 f0sd Lists
## 37 f0sd Lists
## 38 f0sd Lists
ggplot(dat |> filter(task=="Diapix"), aes(f0sdzGender, prevf0sd))+
geom_point()+
geom_smooth(method="lm")+
facet_wrap(~speaker)
## `geom_smooth()` using formula = 'y ~ x'
## Warning: Removed 423 rows containing non-finite values (`stat_smooth()`).
## Warning: Removed 423 rows containing missing values (`geom_point()`).
corS <- data.frame(matrix(nrow=0, ncol=4))
names(corS) <- c("speaker", "type", "r", "p")
for(s in unique(dat$speaker)){
d <- dat |> filter(speaker==s, task=="Diapix")
if(nrow(d) ==0){next} # because we don't have the Diapix files for one dyad
c <- cor.test(d$f0sdzGender, d$prevf0sd,
alternative="two.sided", method="pearson")
corS[nrow(corS)+1,] <- c(s, "real", c$estimate, c$p.value)
for(i in 1:10){
c <- cor.test(d[,paste0("mockSd", i)], d$f0sdzGender,
alternative="two.sided", method="pearson")
corS[nrow(corS)+1,] <- c(s, "mock", c$estimate, c$p.value)
}
}
corS <- corS |>
mutate(sign = ifelse(p < 0.05, "*", NA),
direction = case_when(
r < 0 ~ "synchrony",
r > 0 ~ "dyssynchrony"))
count <- data.frame(matrix(nrow=0, ncol=6))
names(count) <- c("speaker", "type", "direction", "coefficient", "mock", "real")
for(s in unique(corS$speaker)){
count[nrow(count)+1,] <- c(s,
"synchrony",
corS$direction[corS$speaker==s & corS$type=="real"],
corS$r[corS$speaker==s & corS$type=="real"],
sum(!is.na(corS$sign[corS$speaker==s & corS$type=="mock"])==TRUE),
ifelse(corS$sign[corS$speaker==s & corS$type=="real"]=="*", "yes", "no"))
}
count <- merge(count, dat |> select(condition, speaker) |> filter(!duplicated(speaker)), by ="speaker")
count <- count |>
mutate_at("mock", as.numeric) |>
arrange(mock) |>
mutate(feature = "f0sd",
section = "Diapix",
real = case_when(
mock > 1 | is.na(real) ~ "no",
.default = as.character(real)
))
all <- rbind(all, count)
Below we see the 4 speakers whose correlation was significant between time and real f0 median differences AND who had no more than 1 significant correlation between time and randomly ordered f0 median.
All desynchronized – two in the close and two in the impersonal condition.
print(count)
## speaker type direction coefficient mock real condition
## 1 AML-A synchrony dyssynchrony 0.213149040230424 0 no impersonal
## 2 FXO-A synchrony dyssynchrony 0.116267088295808 0 no impersonal
## 3 HAG-A synchrony synchrony -0.0258574451716526 0 no close
## 4 HUJ-B synchrony dyssynchrony 0.0694753771190249 0 no close
## 5 KDA-B synchrony synchrony -0.173814635133023 0 no impersonal
## 6 KPB-B synchrony dyssynchrony 0.0429610119228431 0 no close
## 7 NLO-A synchrony dyssynchrony 0.274685348789403 0 yes close
## 8 OXQ-B synchrony dyssynchrony 0.125944355427305 0 no impersonal
## 9 QRT-A synchrony dyssynchrony 0.0685951582052735 0 no close
## 10 QRT-B synchrony synchrony -0.103871471357015 0 no close
## 11 SUK-A synchrony dyssynchrony 0.122742433682649 0 no impersonal
## 12 TTN-A synchrony dyssynchrony 0.269606087931864 0 yes close
## 13 TTN-B synchrony dyssynchrony 0.0148518818339105 0 no close
## 14 VDE-A synchrony dyssynchrony 0.125461158248949 0 no impersonal
## 15 VDE-B synchrony dyssynchrony 0.0421230600989299 0 no impersonal
## 16 ZNV-A synchrony dyssynchrony 0.0164402610084308 0 no close
## 17 AML-B synchrony dyssynchrony 0.159665173881711 1 no impersonal
## 18 FXO-B synchrony dyssynchrony 0.0539337834963957 1 no impersonal
## 19 HAG-B synchrony dyssynchrony 0.144160714665811 1 no close
## 20 HBR-A synchrony dyssynchrony 0.18017574059799 1 no impersonal
## 21 HUJ-A synchrony dyssynchrony 0.0357576172231783 1 no close
## 22 KDA-A synchrony synchrony -0.04732298787283 1 no impersonal
## 23 KPB-A synchrony dyssynchrony 0.0419557813493141 1 no close
## 24 MJG-A synchrony dyssynchrony 0.0925991818141589 1 no close
## 25 MJG-B synchrony dyssynchrony 0.0395987436268099 1 no close
## 26 NLO-B synchrony synchrony -0.0566893964030592 1 no close
## 27 OAL-A synchrony dyssynchrony 0.0849493721184279 1 no impersonal
## 28 OAL-B synchrony dyssynchrony 0.1556678036417 1 no impersonal
## 29 OXQ-A synchrony synchrony -0.0262836588313215 1 no impersonal
## 30 SGB-A synchrony dyssynchrony 0.0936196772572417 1 no impersonal
## 31 SGB-B synchrony dyssynchrony 0.0518580350034459 1 no impersonal
## 32 SUK-B synchrony synchrony -0.0115946599908498 1 no impersonal
## 33 TTY-A synchrony dyssynchrony 0.0697079625476732 1 no close
## 34 HBR-B synchrony dyssynchrony 0.224525402973737 2 no impersonal
## 35 TTY-B synchrony synchrony -0.0835069709913098 2 no close
## 36 ZNV-B synchrony dyssynchrony 0.169712359137907 2 no close
## feature section
## 1 f0sd Diapix
## 2 f0sd Diapix
## 3 f0sd Diapix
## 4 f0sd Diapix
## 5 f0sd Diapix
## 6 f0sd Diapix
## 7 f0sd Diapix
## 8 f0sd Diapix
## 9 f0sd Diapix
## 10 f0sd Diapix
## 11 f0sd Diapix
## 12 f0sd Diapix
## 13 f0sd Diapix
## 14 f0sd Diapix
## 15 f0sd Diapix
## 16 f0sd Diapix
## 17 f0sd Diapix
## 18 f0sd Diapix
## 19 f0sd Diapix
## 20 f0sd Diapix
## 21 f0sd Diapix
## 22 f0sd Diapix
## 23 f0sd Diapix
## 24 f0sd Diapix
## 25 f0sd Diapix
## 26 f0sd Diapix
## 27 f0sd Diapix
## 28 f0sd Diapix
## 29 f0sd Diapix
## 30 f0sd Diapix
## 31 f0sd Diapix
## 32 f0sd Diapix
## 33 f0sd Diapix
## 34 f0sd Diapix
## 35 f0sd Diapix
## 36 f0sd Diapix
all <- all |>
mutate(sectionN = case_when(
section=="entireExp" ~ 1,
section=="Lists" ~ 2,
section=="Diapix" ~ 3
)) |>
rename(entrain=real) |>
arrange(sectionN) |>
select(-sectionN)
print(all |> filter(entrain=="yes"))
## speaker type direction coefficient mock entrain condition
## 1 OXQ-B convergence divergence 0.166665816388928 0 yes impersonal
## 2 HBR-B convergence divergence 0.189733700626366 1 yes impersonal
## 3 OAL-A convergence convergence -0.298741868255242 0 yes impersonal
## 4 ZNV-B convergence convergence -0.201336693011536 0 yes close
## 5 HUJ-A synchrony dyssynchrony 0.201242839663773 0 yes close
## 6 SUK-A synchrony dyssynchrony 0.159852903688834 0 yes impersonal
## 7 NLO-A synchrony dyssynchrony 0.255974952041841 0 yes close
## 8 OAL-A synchrony dyssynchrony 0.221544511147051 0 yes impersonal
## 9 OXQ-B synchrony dyssynchrony 0.175236749050539 0 yes impersonal
## 10 TTY-A synchrony dyssynchrony 0.202849083353503 0 yes close
## 11 AML-B synchrony dyssynchrony 0.25550299231443 1 yes impersonal
## 12 TTN-A synchrony dyssynchrony 0.182537194609077 1 yes close
## 13 VDE-B synchrony dyssynchrony 0.182133613328821 1 yes impersonal
## 14 NLO-A synchrony dyssynchrony 0.233737973133155 0 yes close
## 15 TTN-A synchrony dyssynchrony 0.237874092353268 0 yes close
## 16 AML-B synchrony dyssynchrony 0.201950622475474 1 yes impersonal
## 17 VDE-A synchrony dyssynchrony 0.23120492741628 1 yes impersonal
## 18 NLO-B convergence convergence -0.443954182939341 0 yes close
## 19 VDE-B convergence convergence -0.276028480202935 0 yes impersonal
## 20 VDE-A convergence convergence -0.31549012844638 0 yes impersonal
## 21 SGB-B convergence divergence 0.438803836732088 1 yes impersonal
## 22 HUJ-A synchrony dyssynchrony 0.357298884252694 0 yes close
## 23 QRT-A synchrony synchrony -0.390678503584988 1 yes close
## 24 VDE-A synchrony dyssynchrony 0.33594454744154 0 yes impersonal
## 25 AML-B synchrony dyssynchrony 0.30879429857356 0 yes impersonal
## 26 TTY-A synchrony dyssynchrony 0.424723946652559 0 yes close
## 27 HAG-A synchrony synchrony -0.404067610268608 1 yes close
## 28 TTY-B convergence convergence -0.220872291596236 0 yes close
## 29 OAL-A convergence convergence -0.324655159586839 0 yes impersonal
## 30 QRT-B convergence divergence 0.241788295665581 1 yes close
## 31 SUK-A synchrony dyssynchrony 0.183856357146092 0 yes impersonal
## 32 SUK-B synchrony dyssynchrony 0.21883773693464 0 yes impersonal
## 33 ZNV-A synchrony dyssynchrony 0.342883979389005 0 yes close
## 34 AML-B synchrony dyssynchrony 0.231186577537179 0 yes impersonal
## 35 NLO-A synchrony dyssynchrony 0.338057672770634 0 yes close
## 36 OXQ-B synchrony dyssynchrony 0.30774262109949 0 yes impersonal
## 37 TTN-A synchrony dyssynchrony 0.237696697413486 0 yes close
## 38 NLO-A synchrony dyssynchrony 0.274685348789403 0 yes close
## 39 TTN-A synchrony dyssynchrony 0.269606087931864 0 yes close
## feature section
## 1 f0median entireExp
## 2 f0median entireExp
## 3 f0sd entireExp
## 4 f0sd entireExp
## 5 f0median entireExp
## 6 f0median entireExp
## 7 f0max entireExp
## 8 f0max entireExp
## 9 f0max entireExp
## 10 f0max entireExp
## 11 f0max entireExp
## 12 f0max entireExp
## 13 f0max entireExp
## 14 f0sd entireExp
## 15 f0sd entireExp
## 16 f0sd entireExp
## 17 f0sd entireExp
## 18 f0max Lists
## 19 f0max Lists
## 20 f0sd Lists
## 21 f0sd Lists
## 22 f0median Lists
## 23 f0median Lists
## 24 f0max Lists
## 25 f0sd Lists
## 26 f0sd Lists
## 27 f0sd Lists
## 28 f0median Diapix
## 29 f0sd Diapix
## 30 f0sd Diapix
## 31 f0median Diapix
## 32 f0median Diapix
## 33 f0median Diapix
## 34 f0max Diapix
## 35 f0max Diapix
## 36 f0max Diapix
## 37 f0max Diapix
## 38 f0sd Diapix
## 39 f0sd Diapix
Most effects were when we take the entire experiment. But a considerable number of effects in each section too. (Note: the higher number of effects across the whole experiment should not be because of the increased number of datapoints, since we do a robustness check with 10 pseudo-datapoints to control for number of points.)
table(all$section[all$entrain=="yes"])
##
## Diapix entireExp Lists
## 12 17 10
table(paste(all$section[all$entrain=="yes"], all$type[all$entrain=="yes"]))
##
## Diapix convergence Diapix synchrony entireExp convergence
## 3 9 4
## entireExp synchrony Lists convergence Lists synchrony
## 13 4 6
There is a similar number of divergence and convergence effects. But there was almost no synchrony and a lot of dyssynchrony.
table(all$direction[all$entrain=="yes"])
##
## convergence divergence dyssynchrony synchrony
## 7 4 26 2
There was roughly the same number of effects on each feature.
table(all$feature[all$entrain=="yes"])
##
## f0max f0median f0sd
## 14 10 15
table(paste(all$feature[all$entrain=="yes"], all$direction[all$direction!="dyssynchrony" & all$entrain=="yes"]))
##
## f0max convergence f0max divergence f0max synchrony
## 7 3 4
## f0median convergence f0median divergence f0median synchrony
## 5 3 2
## f0sd convergence f0sd divergence
## 9 6
When a given speaker shows an effect, they more likely also show other effects.
sort(table(all$speaker[all$entrain=="yes"]))
##
## HAG-A HBR-B NLO-B QRT-A QRT-B SGB-B SUK-B TTY-B ZNV-A ZNV-B HUJ-A SUK-A TTY-A
## 1 1 1 1 1 1 1 1 1 1 2 2 2
## VDE-B OAL-A OXQ-B VDE-A AML-B NLO-A TTN-A
## 2 3 3 3 4 4 4
hist(table(all$speaker[all$entrain=="yes"]))
There doesn’t seem to be a relationship between significant temperature change and a participant’s tendency to converge or diverge from their partner. (Looking at the data set itself, there also doesn’t seem to be any patterns.)
load(paste0(here::here(), "/data/individualTemp.RData"))
t1 <- indTemp |>
filter(section=="entireExp") |>
group_by(speaker) |>
pivot_wider(names_from = ROI, values_from = effect, id_cols=speaker) |>
mutate(section = "entireExp")
t2 <- indTemp |>
filter(section=="Lists") |>
group_by(speaker) |>
pivot_wider(names_from = ROI, values_from = effect, id_cols=speaker) |>
mutate(section = "Lists")
t3 <- indTemp |>
filter(section=="Diapix") |>
group_by(speaker) |>
pivot_wider(names_from = ROI, values_from = effect, id_cols=speaker) |>
mutate(section = "Diapix")
# mutate(section = "Lists") # naming the Diapix section and "Lists" to pair these values with the f0 adaptation during "Lists", since temperature change takes a while to happen
# this also didn't bring about any new insights
t <- rbind(t1, rbind(t2, t3))
# t <- rbind(t1, t3)
dat <- merge(t,
all |>
filter(type == "convergence") |>
select(speaker, direction, entrain, condition, feature, section),
by=c("speaker", "section"), all=TRUE) |>
mutate(conv = case_when(
entrain == "no" ~ "ns",
entrain == "yes" ~ direction
)) |>
mutate_all(as.factor)
dat$Forehead <- relevel(dat$Forehead, ref="ns")
dat$Cheeks <- relevel(dat$Cheeks, ref="ns")
dat$Eyes <- relevel(dat$Eyes, ref="ns")
dat$Nose <- relevel(dat$Nose, ref="ns")
dat$conv <- relevel(dat$conv, ref="ns")
ggplot(dat |> drop_na(), aes(Forehead, fill=conv))+
geom_bar()+
ggtitle("Forehead")+
scale_fill_manual(values = c("divergence" = "lightblue", "convergence" = "red", "ns" = "gray"))
tidy(m <- nnet::multinom(Forehead ~ conv, data=dat)) |>
as_tibble() |>
mutate(estimate = plogis(estimate)) # plogis transforms log odds (the output of the model) into probabilities
## # weights: 12 (6 variable)
## initial value 355.950382
## iter 10 value 248.441879
## iter 20 value 248.367432
## final value 248.362748
## converged
## # A tibble: 6 × 6
## y.level term estimate std.error statistic p.value
## <chr> <chr> <dbl> <dbl> <dbl> <dbl>
## 1 decrease (Intercept) 0.0732 0.346 -7.33 2.25e-13
## 2 decrease convconvergence 0.000327 88.0 -0.0912 9.27e- 1
## 3 decrease convdivergence 0.0000939 367. -0.0253 9.80e- 1
## 4 increase (Intercept) 0.625 0.118 4.31 1.61e- 5
## 5 increase convconvergence 0.194 0.845 -1.69 9.15e- 2
## 6 increase convdivergence 0.643 1.16 0.505 6.13e- 1
ggplot(dat |> drop_na(), aes(Cheeks, fill=conv))+
geom_bar()+
ggtitle("Cheeks")+
scale_fill_manual(values = c("divergence" = "lightblue", "convergence" = "red", "ns" = "gray"))
tidy(m <- nnet::multinom(Cheeks ~ conv, data=dat)) |>
as_tibble() |>
mutate(estimate = plogis(estimate)) # plogis transforms log odds (the output of the model) into probabilities
## # weights: 12 (6 variable)
## initial value 355.950382
## iter 10 value 324.150740
## final value 324.133640
## converged
## # A tibble: 6 × 6
## y.level term estimate std.error statistic p.value
## <chr> <chr> <dbl> <dbl> <dbl> <dbl>
## 1 decrease (Intercept) 0.271 0.149 -6.66 2.70e-11
## 2 decrease convconvergence 0.000629 29.3 -0.252 8.01e- 1
## 3 decrease convdivergence 0.573 1.23 0.238 8.12e- 1
## 4 increase (Intercept) 0.335 0.134 -5.14 2.78e- 7
## 5 increase convconvergence 0.444 0.847 -0.267 7.89e- 1
## 6 increase convdivergence 0.498 1.23 -0.00801 9.94e- 1
ggplot(dat |> drop_na(), aes(Eyes, fill=conv))+
geom_bar()+
ggtitle("Eyes")+
scale_fill_manual(values = c("divergence" = "lightblue", "convergence" = "red", "ns" = "gray"))
tidy(m <- nnet::multinom(Eyes ~ conv, data=dat)) |>
as_tibble() |>
mutate(estimate = plogis(estimate)) # plogis transforms log odds (the output of the model) into probabilities
## # weights: 12 (6 variable)
## initial value 299.921155
## iter 10 value 206.435788
## iter 20 value 206.339052
## final value 206.332662
## converged
## # A tibble: 6 × 6
## y.level term estimate std.error statistic p.value
## <chr> <chr> <dbl> <dbl> <dbl> <dbl>
## 1 decrease (Intercept) 0.0792 0.368 -6.66 2.76e-11
## 2 decrease convconvergence 1.00 65.9 0.164 8.69e- 1
## 3 decrease convdivergence 0.357 842. -0.000698 9.99e- 1
## 4 increase (Intercept) 0.638 0.130 4.37 1.26e- 5
## 5 increase convconvergence 1.00 65.9 0.135 8.93e- 1
## 6 increase convdivergence 1.00 180. 0.0623 9.50e- 1
ggplot(dat |> drop_na(), aes(Nose, fill=conv))+
geom_bar()+
ggtitle("Nose")+
scale_fill_manual(values = c("divergence" = "lightblue", "convergence" = "red", "ns" = "gray"))
tidy(m <- nnet::multinom(Nose ~ conv, data=dat)) |>
as_tibble() |>
mutate(estimate = plogis(estimate)) # plogis transforms log odds (the output of the model) into probabilities
## # weights: 12 (6 variable)
## initial value 355.950382
## iter 10 value 274.873198
## final value 274.849731
## converged
## # A tibble: 6 × 6
## y.level term estimate std.error statistic p.value
## <chr> <chr> <dbl> <dbl> <dbl> <dbl>
## 1 decrease (Intercept) 0.362 0.203 -2.79 5.23e- 3
## 2 decrease convconvergence 0.000292 54.9 -0.148 8.82e- 1
## 3 decrease convdivergence 0.999 29.2 0.250 8.02e- 1
## 4 increase (Intercept) 0.756 0.140 8.06 7.35e-16
## 5 increase convconvergence 0.446 0.848 -0.255 7.99e- 1
## 6 increase convdivergence 0.999 29.2 0.230 8.18e- 1
Also for synchrony, there doesn’t seem to be a relationship between temperature change and tendency to (dys)synchronize.
dat <- merge(t,
all |>
filter(type == "synchrony") |>
select(speaker, direction, entrain, condition, feature, section),
by=c("speaker", "section"), all=TRUE) |>
mutate(syn = case_when(
entrain == "no" ~ "ns",
entrain == "yes" ~ direction
)) |>
mutate_all(as.factor)
dat$Forehead <- relevel(dat$Forehead, ref="ns")
dat$Cheeks <- relevel(dat$Cheeks, ref="ns")
dat$Eyes <- relevel(dat$Eyes, ref="ns")
dat$Nose <- relevel(dat$Nose, ref="ns")
dat$syn <- relevel(dat$syn, ref="ns")
ggplot(dat |> drop_na(), aes(Forehead, fill=syn))+
geom_bar()+
ggtitle("Forehead")+
scale_fill_manual(values = c("dyssynchrony" = "lightblue", "synchrony" = "red", "ns" = "gray"))
tidy(m <- nnet::multinom(Forehead ~ syn, data=dat)) |>
as_tibble() |>
mutate(estimate = plogis(estimate)) # plogis transforms log odds (the output of the model) into probabilities
## # weights: 12 (6 variable)
## initial value 355.950382
## iter 10 value 247.683020
## iter 20 value 247.528105
## iter 30 value 247.517939
## iter 30 value 247.517939
## iter 30 value 247.517939
## final value 247.517939
## converged
## # A tibble: 6 × 6
## y.level term estimate std.error statistic p.value
## <chr> <chr> <dbl> <dbl> <dbl> <dbl>
## 1 decrease (Intercept) 0.0598 0.390 -7.07 1.59e-12
## 2 decrease syndyssynchrony 0.611 1.12 0.403 6.87e- 1
## 3 decrease synsynchrony 1.00 49.9 0.212 8.32e- 1
## 4 increase (Intercept) 0.621 0.121 4.07 4.71e- 5
## 5 increase syndyssynchrony 0.461 0.431 -0.362 7.17e- 1
## 6 increase synsynchrony 0.999 49.9 0.147 8.83e- 1
ggplot(dat |> drop_na(), aes(Cheeks, fill=syn))+
geom_bar()+
ggtitle("Cheeks")+
scale_fill_manual(values = c("dyssynchrony" = "lightblue", "synchrony" = "red", "ns" = "gray"))
tidy(m <- nnet::multinom(Cheeks ~ syn, data=dat)) |>
as_tibble() |>
mutate(estimate = plogis(estimate)) # plogis transforms log odds (the output of the model) into probabilities
## # weights: 12 (6 variable)
## initial value 355.950382
## iter 10 value 324.174085
## iter 20 value 324.105678
## iter 30 value 324.102335
## final value 324.102283
## converged
## # A tibble: 6 × 6
## y.level term estimate std.error statistic p.value
## <chr> <chr> <dbl> <dbl> <dbl> <dbl>
## 1 decrease (Intercept) 0.256 0.156 -6.83 8.39e-12
## 2 decrease syndyssynchrony 0.610 0.494 0.908 3.64e- 1
## 3 decrease synsynchrony 0.744 1.42 0.751 4.53e- 1
## 4 increase (Intercept) 0.339 0.136 -4.92 8.58e- 7
## 5 increase syndyssynchrony 0.429 0.543 -0.528 5.97e- 1
## 6 increase synsynchrony 0.00129 38.8 -0.171 8.64e- 1
ggplot(dat |> drop_na(), aes(Eyes, fill=syn))+
geom_bar()+
ggtitle("Eyes")+
scale_fill_manual(values = c("dyssynchrony" = "lightblue", "synchrony" = "red", "ns" = "gray"))
tidy(m <- nnet::multinom(Eyes ~ syn, data=dat)) |>
as_tibble() |>
mutate(estimate = plogis(estimate)) # plogis transforms log odds (the output of the model) into probabilities
## # weights: 12 (6 variable)
## initial value 299.921155
## iter 10 value 208.026588
## iter 20 value 207.905687
## iter 30 value 207.897841
## final value 207.897838
## converged
## # A tibble: 6 × 6
## y.level term estimate std.error statistic p.value
## <chr> <chr> <dbl> <dbl> <dbl> <dbl>
## 1 decrease (Intercept) 0.0978 0.351 -6.33 2.44e-10
## 2 decrease syndyssynchrony 0.000513 47.4 -0.160 8.73e- 1
## 3 decrease synsynchrony 0.000358 114. -0.0699 9.44e- 1
## 4 increase (Intercept) 0.654 0.136 4.70 2.64e- 6
## 5 increase syndyssynchrony 0.481 0.464 -0.168 8.67e- 1
## 6 increase synsynchrony 0.0000867 55.2 -0.169 8.66e- 1
ggplot(dat |> drop_na(), aes(Nose, fill=syn))+
geom_bar()+
ggtitle("Nose")+
scale_fill_manual(values = c("dyssynchrony" = "lightblue", "synchrony" = "red", "ns" = "gray"))
tidy(m <- nnet::multinom(Nose ~ syn, data=dat)) |>
as_tibble() |>
mutate(estimate = plogis(estimate)) # plogis transforms log odds (the output of the model) into probabilities
## # weights: 12 (6 variable)
## initial value 355.950382
## iter 10 value 272.621431
## final value 272.573611
## converged
## # A tibble: 6 × 6
## y.level term estimate std.error statistic p.value
## <chr> <chr> <dbl> <dbl> <dbl> <dbl>
## 1 decrease (Intercept) 0.347 0.212 -2.98 2.86e- 3
## 2 decrease syndyssynchrony 0.759 0.761 1.51 1.32e- 1
## 3 decrease synsynchrony 0.00230 20.2 -0.300 7.64e- 1
## 4 increase (Intercept) 0.757 0.144 7.89 2.96e-15
## 5 increase syndyssynchrony 0.646 0.643 0.938 3.48e- 1
## 6 increase synsynchrony 0.000145 33.3 -0.265 7.91e- 1
d <- merge(all |> filter(type == "convergence") |> select(-condition),
meta,
by="speaker", all=TRUE) |>
# mutate(entrain = ifelse(is.na(entrain), "no", entrain),
# direction = ifelse(is.na(direction), "no", direction),
# type = ifelse(is.na(type), "convergence", type)) |>
mutate(effect = case_when(
entrain == "no" ~ "ns",
entrain == "yes" ~ direction
)) |>
mutate_at(c("entrain", "direction", "type", "effect"), as.factor) |>
mutate_at(c("coefficient"), as.numeric)
d$effect <- relevel(d$effect, ref="ns")
No effect of condition.
summary(lmer(coefficient ~ condition + (1|speaker), d))
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: coefficient ~ condition + (1 | speaker)
## Data: d
##
## REML criterion at convergence: -263.2
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -2.87861 -0.63383 0.05659 0.57917 3.04691
##
## Random effects:
## Groups Name Variance Std.Dev.
## speaker (Intercept) 0.005086 0.07132
## Residual 0.022974 0.15157
## Number of obs: 336, groups: speaker, 38
##
## Fixed effects:
## Estimate Std. Error df t value Pr(>|t|)
## (Intercept) 0.02081 0.01968 34.18966 1.057 0.298
## conditionimpersonal -0.02845 0.02849 33.74052 -0.998 0.325
##
## Correlation of Fixed Effects:
## (Intr)
## cndtnmprsnl -0.691
No effect of speakers’ perception of their partner. (It was the same
null result if I used d |> filter(entrain=="yes"), and
also if we look at each section separately.)
summary(lmer(coefficient ~ closeness + (1|speaker), d))
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: coefficient ~ closeness + (1 | speaker)
## Data: d
##
## REML criterion at convergence: -260.3
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -2.8805 -0.6228 0.0539 0.5706 3.0579
##
## Random effects:
## Groups Name Variance Std.Dev.
## speaker (Intercept) 0.005175 0.07193
## Residual 0.022966 0.15155
## Number of obs: 336, groups: speaker, 38
##
## Fixed effects:
## Estimate Std. Error df t value Pr(>|t|)
## (Intercept) -0.016168 0.031799 33.637379 -0.508 0.614
## closeness 0.006355 0.007712 33.637379 0.824 0.416
##
## Correlation of Fixed Effects:
## (Intr)
## closeness -0.893
summary(lmer(coefficient ~ similarity + (1|speaker), d))
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: coefficient ~ similarity + (1 | speaker)
## Data: d
##
## REML criterion at convergence: -261.6
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -2.79069 -0.63298 0.04363 0.56102 3.13755
##
## Random effects:
## Groups Name Variance Std.Dev.
## speaker (Intercept) 0.004847 0.06962
## Residual 0.022995 0.15164
## Number of obs: 336, groups: speaker, 38
##
## Fixed effects:
## Estimate Std. Error df t value Pr(>|t|)
## (Intercept) 0.068524 0.046637 32.981787 1.469 0.151
## similarity -0.010891 0.007903 33.009544 -1.378 0.177
##
## Correlation of Fixed Effects:
## (Intr)
## similarity -0.954
summary(lmer(coefficient ~ likeability + (1|speaker), d))
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: coefficient ~ likeability + (1 | speaker)
## Data: d
##
## REML criterion at convergence: -260.2
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -2.84476 -0.62292 0.04601 0.57262 3.07541
##
## Random effects:
## Groups Name Variance Std.Dev.
## speaker (Intercept) 0.005307 0.07285
## Residual 0.022971 0.15156
## Number of obs: 336, groups: speaker, 38
##
## Fixed effects:
## Estimate Std. Error df t value Pr(>|t|)
## (Intercept) 0.0106562 0.0704852 33.7361786 0.151 0.881
## likeability -0.0004988 0.0100570 33.8773281 -0.050 0.961
##
## Correlation of Fixed Effects:
## (Intr)
## likeability -0.979
summary(lmer(coefficient ~ becomeFriends + (1|speaker), d))
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: coefficient ~ becomeFriends + (1 | speaker)
## Data: d
##
## REML criterion at convergence: -260.3
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -2.80597 -0.62520 0.03825 0.56254 3.07642
##
## Random effects:
## Groups Name Variance Std.Dev.
## speaker (Intercept) 0.005161 0.07184
## Residual 0.022979 0.15159
## Number of obs: 336, groups: speaker, 38
##
## Fixed effects:
## Estimate Std. Error df t value Pr(>|t|)
## (Intercept) 0.044295 0.051058 33.404863 0.868 0.392
## becomeFriends -0.006244 0.008258 33.614280 -0.756 0.455
##
## Correlation of Fixed Effects:
## (Intr)
## becomeFrnds -0.960
# summary(lmer(coefficient ~ closeness + (1|speaker), d |> filter(section=="Lists")))
# summary(lmer(coefficient ~ similarity + (1|speaker), d |> filter(section=="Lists")))
# summary(lmer(coefficient ~ likeability + (1|speaker), d |> filter(section=="Lists")))
# summary(lmer(coefficient ~ becomeFriends + (1|speaker), d |> filter(section=="Lists")))
#
# summary(lmer(coefficient ~ closeness + (1|speaker), d |> filter(section=="Diapix")))
# summary(lmer(coefficient ~ similarity + (1|speaker), d |> filter(section=="Diapix")))
# summary(lmer(coefficient ~ likeability + (1|speaker), d |> filter(section=="Diapix")))
# summary(lmer(coefficient ~ becomeFriends + (1|speaker), d |> filter(section=="Diapix")))
#
# summary(lmer(coefficient ~ closeness + (1|speaker), d |> filter(section=="entireExp")))
# summary(lmer(coefficient ~ similarity + (1|speaker), d |> filter(section=="entireExp")))
# summary(lmer(coefficient ~ likeability + (1|speaker), d |> filter(section=="entireExp")))
# summary(lmer(coefficient ~ becomeFriends + (1|speaker), d |> filter(section=="entireExp")))
No effect of speakers’ perception of BFI scores.
summary(lmer(coefficient ~ extraversion + (1|speaker), d))
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: coefficient ~ extraversion + (1 | speaker)
## Data: d
##
## REML criterion at convergence: -262
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -2.8338 -0.6216 0.0562 0.5791 3.0690
##
## Random effects:
## Groups Name Variance Std.Dev.
## speaker (Intercept) 0.005281 0.07267
## Residual 0.022970 0.15156
## Number of obs: 336, groups: speaker, 38
##
## Fixed effects:
## Estimate Std. Error df t value Pr(>|t|)
## (Intercept) -0.023629 0.084964 33.424880 -0.278 0.783
## extraversion 0.008929 0.024225 33.438679 0.369 0.715
##
## Correlation of Fixed Effects:
## (Intr)
## extraversin -0.986
summary(lmer(coefficient ~ openness + (1|speaker), d))
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: coefficient ~ openness + (1 | speaker)
## Data: d
##
## REML criterion at convergence: -261.5
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -2.83197 -0.61228 0.03945 0.56462 3.07429
##
## Random effects:
## Groups Name Variance Std.Dev.
## speaker (Intercept) 0.005254 0.07249
## Residual 0.022970 0.15156
## Number of obs: 336, groups: speaker, 38
##
## Fixed effects:
## Estimate Std. Error df t value Pr(>|t|)
## (Intercept) 0.039064 0.064183 33.833267 0.609 0.547
## openness -0.009011 0.017706 33.679235 -0.509 0.614
##
## Correlation of Fixed Effects:
## (Intr)
## openness -0.975
summary(lmer(coefficient ~ agreeableness + (1|speaker), d))
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: coefficient ~ agreeableness + (1 | speaker)
## Data: d
##
## REML criterion at convergence: -262.2
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -2.84765 -0.62133 0.05057 0.57654 3.07567
##
## Random effects:
## Groups Name Variance Std.Dev.
## speaker (Intercept) 0.00531 0.07287
## Residual 0.02297 0.15156
## Number of obs: 336, groups: speaker, 38
##
## Fixed effects:
## Estimate Std. Error df t value Pr(>|t|)
## (Intercept) -0.0002254 0.0996640 34.1385172 -0.002 0.998
## agreeableness 0.0021606 0.0285599 34.3035338 0.076 0.940
##
## Correlation of Fixed Effects:
## (Intr)
## agreeablnss -0.989
summary(lmer(coefficient ~ conscientiousness + (1|speaker), d))
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: coefficient ~ conscientiousness + (1 | speaker)
## Data: d
##
## REML criterion at convergence: -264.6
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -2.79128 -0.63232 0.04264 0.55342 3.10992
##
## Random effects:
## Groups Name Variance Std.Dev.
## speaker (Intercept) 0.00471 0.06863
## Residual 0.02299 0.15162
## Number of obs: 336, groups: speaker, 38
##
## Fixed effects:
## Estimate Std. Error df t value Pr(>|t|)
## (Intercept) 0.16748 0.09903 32.98984 1.691 0.100
## conscientiousness -0.03986 0.02439 32.97684 -1.634 0.112
##
## Correlation of Fixed Effects:
## (Intr)
## conscntsnss -0.990
summary(lmer(coefficient ~ neuroticism + (1|speaker), d))
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: coefficient ~ neuroticism + (1 | speaker)
## Data: d
##
## REML criterion at convergence: -262.2
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -2.84933 -0.61298 0.04392 0.57265 3.09676
##
## Random effects:
## Groups Name Variance Std.Dev.
## speaker (Intercept) 0.00517 0.0719
## Residual 0.02297 0.1516
## Number of obs: 336, groups: speaker, 38
##
## Fixed effects:
## Estimate Std. Error df t value Pr(>|t|)
## (Intercept) 0.05777 0.06400 33.57066 0.903 0.373
## neuroticism -0.01601 0.01976 33.49477 -0.810 0.424
##
## Correlation of Fixed Effects:
## (Intr)
## neuroticism -0.975
d <- merge(all |> filter(type == "convergence") |> select(-condition),
meta,
by="speaker", all=TRUE) |>
# mutate(entrain = ifelse(is.na(entrain), "no", entrain),
# direction = ifelse(is.na(direction), "no", direction),
# type = ifelse(is.na(type), "convergence", type)) |>
mutate(effect = case_when(
entrain == "no" ~ "ns",
entrain == "yes" ~ direction
)) |>
mutate_at(c("entrain", "direction", "type", "effect"), as.factor)
d$effect <- relevel(d$effect, ref="ns")
Condition: no effect
summary(glmer(entrain ~ condition + (1|speaker), d, family=binomial))
## boundary (singular) fit: see help('isSingular')
## Generalized linear mixed model fit by maximum likelihood (Laplace
## Approximation) [glmerMod]
## Family: binomial ( logit )
## Formula: entrain ~ condition + (1 | speaker)
## Data: d
##
## AIC BIC logLik deviance df.resid
## 101.8 113.2 -47.9 95.8 333
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -0.2125 -0.2125 -0.1534 -0.1534 6.5192
##
## Random effects:
## Groups Name Variance Std.Dev.
## speaker (Intercept) 3.84e-16 1.96e-08
## Number of obs: 336, groups: speaker, 38
##
## Fixed effects:
## Estimate Std. Error z value Pr(>|z|)
## (Intercept) -3.7495 0.5058 -7.412 1.24e-13 ***
## conditionimpersonal 0.6520 0.6365 1.024 0.306
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation of Fixed Effects:
## (Intr)
## cndtnmprsnl -0.795
## optimizer (Nelder_Mead) convergence code: 0 (OK)
## boundary (singular) fit: see help('isSingular')
Scores on questionnaire about perception of partner: no effect. Also no effect when we take each section separately.
summary(glmer(entrain ~ closeness + (1|speaker), d, family=binomial))
## boundary (singular) fit: see help('isSingular')
## Generalized linear mixed model fit by maximum likelihood (Laplace
## Approximation) [glmerMod]
## Family: binomial ( logit )
## Formula: entrain ~ closeness + (1 | speaker)
## Data: d
##
## AIC BIC logLik deviance df.resid
## 100.5 111.9 -47.2 94.5 333
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -0.2978 -0.2058 -0.1608 -0.1422 7.0335
##
## Random effects:
## Groups Name Variance Std.Dev.
## speaker (Intercept) 0 0
## Number of obs: 336, groups: speaker, 38
##
## Fixed effects:
## Estimate Std. Error z value Pr(>|z|)
## (Intercept) -4.3943 0.7836 -5.608 2.04e-08 ***
## closeness 0.2465 0.1598 1.543 0.123
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation of Fixed Effects:
## (Intr)
## closeness -0.920
## optimizer (Nelder_Mead) convergence code: 0 (OK)
## boundary (singular) fit: see help('isSingular')
summary(glmer(entrain ~ similarity + (1|speaker), d, family=binomial))
## boundary (singular) fit: see help('isSingular')
## Generalized linear mixed model fit by maximum likelihood (Laplace
## Approximation) [glmerMod]
## Family: binomial ( logit )
## Formula: entrain ~ similarity + (1 | speaker)
## Data: d
##
## AIC BIC logLik deviance df.resid
## 102.6 114.0 -48.3 96.6 333
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -0.2142 -0.1950 -0.1860 -0.1693 6.1903
##
## Random effects:
## Groups Name Variance Std.Dev.
## speaker (Intercept) 0 0
## Number of obs: 336, groups: speaker, 38
##
## Fixed effects:
## Estimate Std. Error z value Pr(>|z|)
## (Intercept) -3.92830 1.07776 -3.645 0.000268 ***
## similarity 0.09411 0.17513 0.537 0.591001
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation of Fixed Effects:
## (Intr)
## similarity -0.959
## optimizer (Nelder_Mead) convergence code: 0 (OK)
## boundary (singular) fit: see help('isSingular')
summary(glmer(entrain ~ likeability + (1|speaker), d, family=binomial))
## boundary (singular) fit: see help('isSingular')
## Generalized linear mixed model fit by maximum likelihood (Laplace
## Approximation) [glmerMod]
## Family: binomial ( logit )
## Formula: entrain ~ likeability + (1 | speaker)
## Data: d
##
## AIC BIC logLik deviance df.resid
## 100.7 112.2 -47.4 94.7 333
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -0.2510 -0.2121 -0.1793 -0.1281 9.2391
##
## Random effects:
## Groups Name Variance Std.Dev.
## speaker (Intercept) 6.306e-14 2.511e-07
## Number of obs: 336, groups: speaker, 38
##
## Fixed effects:
## Estimate Std. Error z value Pr(>|z|)
## (Intercept) -5.7927 1.8122 -3.196 0.00139 **
## likeability 0.3365 0.2402 1.401 0.16123
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation of Fixed Effects:
## (Intr)
## likeability -0.985
## optimizer (Nelder_Mead) convergence code: 0 (OK)
## boundary (singular) fit: see help('isSingular')
summary(glmer(entrain ~ becomeFriends + (1|speaker), d, family=binomial))
## boundary (singular) fit: see help('isSingular')
## Generalized linear mixed model fit by maximum likelihood (Laplace
## Approximation) [glmerMod]
## Family: binomial ( logit )
## Formula: entrain ~ becomeFriends + (1 | speaker)
## Data: d
##
## AIC BIC logLik deviance df.resid
## 102.8 114.3 -48.4 96.8 333
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -0.1920 -0.1867 -0.1841 -0.1815 5.6654
##
## Random effects:
## Groups Name Variance Std.Dev.
## speaker (Intercept) 0 0
## Number of obs: 336, groups: speaker, 38
##
## Fixed effects:
## Estimate Std. Error z value Pr(>|z|)
## (Intercept) -3.553 1.117 -3.182 0.00146 **
## becomeFriends 0.028 0.179 0.156 0.87574
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation of Fixed Effects:
## (Intr)
## becomeFrnds -0.962
## optimizer (Nelder_Mead) convergence code: 0 (OK)
## boundary (singular) fit: see help('isSingular')
# summary(glmer(entrain ~ closeness + (1|speaker), d |> filter(section=="Lists"), family=binomial))
# summary(glmer(entrain ~ similarity + (1|speaker), d |> filter(section=="Lists"), family=binomial))
# summary(glmer(entrain ~ likeability + (1|speaker), d |> filter(section=="Lists"), family=binomial))
# summary(glmer(entrain ~ becomeFriends + (1|speaker), d |> filter(section=="Lists"), family=binomial))
#
# summary(glmer(entrain ~ closeness + (1|speaker), d |> filter(section=="Diapix"), family=binomial))
# summary(glmer(entrain ~ similarity + (1|speaker), d |> filter(section=="Diapix"), family=binomial))
# summary(glmer(entrain ~ likeability + (1|speaker), d |> filter(section=="Diapix"), family=binomial))
# summary(glmer(entrain ~ becomeFriends + (1|speaker), d |> filter(section=="Diapix"), family=binomial))
BFI scores: no effect, BUT openness, agreeableness, neuroticism not converging.
summary(glmer(entrain ~ extraversion + (1|speaker), d, family=binomial))
## boundary (singular) fit: see help('isSingular')
## Generalized linear mixed model fit by maximum likelihood (Laplace
## Approximation) [glmerMod]
## Family: binomial ( logit )
## Formula: entrain ~ extraversion + (1 | speaker)
## Data: d
##
## AIC BIC logLik deviance df.resid
## 102.6 114.0 -48.3 96.6 333
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -0.2082 -0.1945 -0.1849 -0.1727 6.3012
##
## Random effects:
## Groups Name Variance Std.Dev.
## speaker (Intercept) 0 0
## Number of obs: 336, groups: speaker, 38
##
## Fixed effects:
## Estimate Std. Error z value Pr(>|z|)
## (Intercept) -2.4605 1.7953 -1.371 0.171
## extraversion -0.2713 0.5254 -0.516 0.606
##
## Correlation of Fixed Effects:
## (Intr)
## extraversin -0.985
## optimizer (Nelder_Mead) convergence code: 0 (OK)
## boundary (singular) fit: see help('isSingular')
summary(glmer(entrain ~ openness + (1|speaker), d, family=binomial))
## boundary (singular) fit: see help('isSingular')
## Generalized linear mixed model fit by maximum likelihood (Laplace
## Approximation) [glmerMod]
## Family: binomial ( logit )
## Formula: entrain ~ openness + (1 | speaker)
## Data: d
##
## AIC BIC logLik deviance df.resid
## 101.5 112.9 -47.7 95.5 333
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -0.2487 -0.2137 -0.1767 -0.1518 7.9640
##
## Random effects:
## Groups Name Variance Std.Dev.
## speaker (Intercept) 4.296e-17 6.554e-09
## Number of obs: 336, groups: speaker, 38
##
## Fixed effects:
## Estimate Std. Error z value Pr(>|z|)
## (Intercept) -5.0612 1.5459 -3.274 0.00106 **
## openness 0.4557 0.3978 1.146 0.25196
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation of Fixed Effects:
## (Intr)
## openness -0.980
## optimizer (Nelder_Mead) convergence code: 0 (OK)
## boundary (singular) fit: see help('isSingular')
summary(glmer(entrain ~ agreeableness + (1|speaker), d, family=binomial))
## boundary (singular) fit: see help('isSingular')
## Generalized linear mixed model fit by maximum likelihood (Laplace
## Approximation) [glmerMod]
## Family: binomial ( logit )
## Formula: entrain ~ agreeableness + (1 | speaker)
## Data: d
##
## AIC BIC logLik deviance df.resid
## 101.7 113.1 -47.8 95.7 333
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -0.2416 -0.1987 -0.1716 -0.1556 6.7493
##
## Random effects:
## Groups Name Variance Std.Dev.
## speaker (Intercept) 0 0
## Number of obs: 336, groups: speaker, 38
##
## Fixed effects:
## Estimate Std. Error z value Pr(>|z|)
## (Intercept) -1.0811 2.1290 -0.508 0.612
## agreeableness -0.6844 0.6401 -1.069 0.285
##
## Correlation of Fixed Effects:
## (Intr)
## agreeablnss -0.990
## optimizer (Nelder_Mead) convergence code: 0 (OK)
## boundary (singular) fit: see help('isSingular')
summary(glmer(entrain ~ conscientiousness + (1|speaker), d, family=binomial))
## boundary (singular) fit: see help('isSingular')
## Generalized linear mixed model fit by maximum likelihood (Laplace
## Approximation) [glmerMod]
## Family: binomial ( logit )
## Formula: entrain ~ conscientiousness + (1 | speaker)
## Data: d
##
## AIC BIC logLik deviance df.resid
## 102.0 113.5 -48.0 96.0 333
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -0.2275 -0.1975 -0.1793 -0.1639 7.7404
##
## Random effects:
## Groups Name Variance Std.Dev.
## speaker (Intercept) 0 0
## Number of obs: 336, groups: speaker, 38
##
## Fixed effects:
## Estimate Std. Error z value Pr(>|z|)
## (Intercept) -5.5815 2.5399 -2.198 0.028 *
## conscientiousness 0.5359 0.6050 0.886 0.376
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation of Fixed Effects:
## (Intr)
## conscntsnss -0.993
## optimizer (Nelder_Mead) convergence code: 0 (OK)
## boundary (singular) fit: see help('isSingular')
summary(glmer(entrain ~ neuroticism + (1|speaker), d, family=binomial))
## boundary (singular) fit: see help('isSingular')
## Generalized linear mixed model fit by maximum likelihood (Laplace
## Approximation) [glmerMod]
## Family: binomial ( logit )
## Formula: entrain ~ neuroticism + (1 | speaker)
## Data: d
##
## AIC BIC logLik deviance df.resid
## 102.4 113.8 -48.2 96.4 333
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -0.2257 -0.1949 -0.1812 -0.1653 6.6299
##
## Random effects:
## Groups Name Variance Std.Dev.
## speaker (Intercept) 0 0
## Number of obs: 336, groups: speaker, 38
##
## Fixed effects:
## Estimate Std. Error z value Pr(>|z|)
## (Intercept) -4.3327 1.4431 -3.002 0.00268 **
## neuroticism 0.2931 0.4271 0.686 0.49256
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation of Fixed Effects:
## (Intr)
## neuroticism -0.977
## optimizer (Nelder_Mead) convergence code: 0 (OK)
## boundary (singular) fit: see help('isSingular')
Condition: no effect
tidy(m <- nnet::multinom(effect ~ condition, data=d)) |>
as_tibble() |>
mutate(estimate = plogis(estimate))
## # weights: 9 (4 variable)
## initial value 369.133729
## iter 10 value 66.821060
## iter 20 value 54.974000
## final value 54.912750
## converged
## # A tibble: 4 × 6
## y.level term estimate std.error statistic p.value
## <chr> <chr> <dbl> <dbl> <dbl> <dbl>
## 1 convergence (Intercept) 0.0173 0.582 -6.93 4.16e-12
## 2 convergence conditionimpersonal 0.594 0.772 0.492 6.22e- 1
## 3 divergence (Intercept) 0.00585 1.00 -5.12 3.04e- 7
## 4 divergence conditionimpersonal 0.767 1.16 1.03 3.05e- 1
No effect of perception of partner
tidy(m <- nnet::multinom(effect ~ closeness, data=d)) |>
as_tibble() |>
mutate(estimate = plogis(estimate))
## # weights: 9 (4 variable)
## initial value 369.133729
## iter 10 value 83.751097
## iter 20 value 54.223578
## final value 54.200834
## converged
## # A tibble: 4 × 6
## y.level term estimate std.error statistic p.value
## <chr> <chr> <dbl> <dbl> <dbl> <dbl>
## 1 convergence (Intercept) 0.00525 1.04 -5.03 0.000000496
## 2 convergence closeness 0.582 0.201 1.65 0.0996
## 3 divergence (Intercept) 0.00835 1.17 -4.08 0.0000443
## 4 divergence closeness 0.525 0.264 0.375 0.707
tidy(m <- nnet::multinom(effect ~ similarity, data=d)) |>
as_tibble() |>
mutate(estimate = plogis(estimate))
## # weights: 9 (4 variable)
## initial value 369.133729
## iter 10 value 94.167561
## iter 20 value 57.155099
## final value 55.322567
## converged
## # A tibble: 4 × 6
## y.level term estimate std.error statistic p.value
## <chr> <chr> <dbl> <dbl> <dbl> <dbl>
## 1 convergence (Intercept) 0.00776 1.42 -3.41 0.000653
## 2 convergence similarity 0.543 0.224 0.770 0.441
## 3 divergence (Intercept) 0.0148 1.63 -2.57 0.0102
## 4 divergence similarity 0.491 0.282 -0.129 0.897
tidy(m <- nnet::multinom(effect ~ likeability, data=d)) |>
as_tibble() |>
mutate(estimate = plogis(estimate))
## # weights: 9 (4 variable)
## initial value 369.133729
## iter 10 value 56.277056
## iter 20 value 54.482283
## final value 54.481685
## converged
## # A tibble: 4 × 6
## y.level term estimate std.error statistic p.value
## <chr> <chr> <dbl> <dbl> <dbl> <dbl>
## 1 convergence (Intercept) 0.00111 2.38 -2.86 0.00420
## 2 convergence likeability 0.601 0.310 1.32 0.186
## 3 divergence (Intercept) 0.00265 2.76 -2.15 0.0316
## 4 divergence likeability 0.554 0.374 0.581 0.561
tidy(m <- nnet::multinom(effect ~ becomeFriends, data=d)) |>
as_tibble() |>
mutate(estimate = plogis(estimate))
## # weights: 9 (4 variable)
## initial value 369.133729
## iter 10 value 77.488071
## iter 20 value 55.665221
## final value 55.627293
## converged
## # A tibble: 4 × 6
## y.level term estimate std.error statistic p.value
## <chr> <chr> <dbl> <dbl> <dbl> <dbl>
## 1 convergence (Intercept) 0.0179 1.39 -2.88 0.00401
## 2 convergence becomeFriends 0.507 0.223 0.125 0.900
## 3 divergence (Intercept) 0.0103 1.83 -2.49 0.0128
## 4 divergence becomeFriends 0.507 0.294 0.0952 0.924
BFI scores: no effects
tidy(m <- nnet::multinom(effect ~ extraversion, data=d)) |>
as_tibble() |>
mutate(estimate = plogis(estimate))
## # weights: 9 (4 variable)
## initial value 369.133729
## iter 10 value 56.871466
## iter 20 value 55.454628
## final value 55.453741
## converged
## # A tibble: 4 × 6
## y.level term estimate std.error statistic p.value
## <chr> <chr> <dbl> <dbl> <dbl> <dbl>
## 1 convergence (Intercept) 0.0765 2.24 -1.11 0.267
## 2 convergence extraversion 0.402 0.665 -0.598 0.550
## 3 divergence (Intercept) 0.0149 2.95 -1.42 0.155
## 4 divergence extraversion 0.485 0.845 -0.0698 0.944
tidy(m <- nnet::multinom(effect ~ openness, data=d)) |>
as_tibble() |>
mutate(estimate = plogis(estimate))
## # weights: 9 (4 variable)
## initial value 369.133729
## iter 10 value 61.037843
## iter 20 value 54.279508
## final value 54.278538
## converged
## # A tibble: 4 × 6
## y.level term estimate std.error statistic p.value
## <chr> <chr> <dbl> <dbl> <dbl> <dbl>
## 1 convergence (Intercept) 0.000931 2.19 -3.19 0.00143
## 2 convergence openness 0.696 0.537 1.54 0.122
## 3 divergence (Intercept) 0.0176 2.19 -1.84 0.0661
## 4 divergence openness 0.473 0.616 -0.174 0.862
tidy(m <- nnet::multinom(effect ~ agreeableness, data=d)) |>
as_tibble() |>
mutate(estimate = plogis(estimate))
## # weights: 9 (4 variable)
## initial value 369.133729
## iter 10 value 56.833729
## iter 20 value 54.602195
## final value 54.593083
## converged
## # A tibble: 4 × 6
## y.level term estimate std.error statistic p.value
## <chr> <chr> <dbl> <dbl> <dbl> <dbl>
## 1 convergence (Intercept) 0.0493 2.64 -1.12 0.262
## 2 convergence agreeableness 0.436 0.770 -0.334 0.738
## 3 divergence (Intercept) 0.662 3.71 0.181 0.856
## 4 divergence agreeableness 0.175 1.18 -1.31 0.189
tidy(m <- nnet::multinom(effect ~ conscientiousness, data=d)) |>
as_tibble() |>
mutate(estimate = plogis(estimate))
## # weights: 9 (4 variable)
## initial value 369.133729
## iter 10 value 67.863695
## iter 20 value 54.988666
## final value 54.983220
## converged
## # A tibble: 4 × 6
## y.level term estimate std.error statistic p.value
## <chr> <chr> <dbl> <dbl> <dbl> <dbl>
## 1 convergence (Intercept) 0.00758 2.92 -1.67 0.0947
## 2 convergence conscientiousness 0.564 0.707 0.362 0.717
## 3 divergence (Intercept) 0.000107 4.94 -1.85 0.0643
## 4 divergence conscientiousness 0.757 1.14 0.995 0.320
tidy(m <- nnet::multinom(effect ~ neuroticism, data=d)) |>
as_tibble() |>
mutate(estimate = plogis(estimate))
## # weights: 9 (4 variable)
## initial value 369.133729
## iter 10 value 59.259551
## iter 20 value 55.039171
## final value 55.039107
## converged
## # A tibble: 4 × 6
## y.level term estimate std.error statistic p.value
## <chr> <chr> <dbl> <dbl> <dbl> <dbl>
## 1 convergence (Intercept) 0.00326 1.91 -2.99 0.00281
## 2 convergence neuroticism 0.639 0.545 1.05 0.296
## 3 divergence (Intercept) 0.0206 2.18 -1.77 0.0763
## 4 divergence neuroticism 0.457 0.692 -0.249 0.804
d <- merge(all |> filter(type == "synchrony") |> select(-condition),
meta,
by="speaker", all=TRUE) |>
# mutate(entrain = ifelse(is.na(entrain), "no", entrain),
# direction = ifelse(is.na(direction), "no", direction),
# type = ifelse(is.na(type), "synchrony", type)) |>
mutate(effect = case_when(
entrain == "no" ~ "ns",
entrain == "yes" ~ direction
)) |>
mutate_at(c("entrain", "direction", "type", "effect"), as.factor)
No effect of condition on dyssynchrony.
summary(glmer(entrain ~ condition + (1|speaker), d, family=binomial))
## Generalized linear mixed model fit by maximum likelihood (Laplace
## Approximation) [glmerMod]
## Family: binomial ( logit )
## Formula: entrain ~ condition + (1 | speaker)
## Data: d
##
## AIC BIC logLik deviance df.resid
## 185.7 197.2 -89.9 179.7 333
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -0.6841 -0.2852 -0.1584 -0.1583 3.5069
##
## Random effects:
## Groups Name Variance Std.Dev.
## speaker (Intercept) 2.166 1.472
## Number of obs: 336, groups: speaker, 38
##
## Fixed effects:
## Estimate Std. Error z value Pr(>|z|)
## (Intercept) -3.209422 0.614452 -5.223 1.76e-07 ***
## conditionimpersonal 0.001684 0.701146 0.002 0.998
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation of Fixed Effects:
## (Intr)
## cndtnmprsnl -0.577
No effect of perception of partner.
summary(glmer(entrain ~ closeness + (1|speaker), d, family=binomial))
## Generalized linear mixed model fit by maximum likelihood (Laplace
## Approximation) [glmerMod]
## Family: binomial ( logit )
## Formula: entrain ~ closeness + (1 | speaker)
## Data: d
##
## AIC BIC logLik deviance df.resid
## 185.2 196.7 -89.6 179.2 333
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -0.7049 -0.2788 -0.1609 -0.1449 3.5874
##
## Random effects:
## Groups Name Variance Std.Dev.
## speaker (Intercept) 2.187 1.479
## Number of obs: 336, groups: speaker, 38
##
## Fixed effects:
## Estimate Std. Error z value Pr(>|z|)
## (Intercept) -3.7318 0.9526 -3.918 8.94e-05 ***
## closeness 0.1364 0.1949 0.700 0.484
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation of Fixed Effects:
## (Intr)
## closeness -0.850
summary(glmer(entrain ~ similarity + (1|speaker), d, family=binomial))
## Generalized linear mixed model fit by maximum likelihood (Laplace
## Approximation) [glmerMod]
## Family: binomial ( logit )
## Formula: entrain ~ similarity + (1 | speaker)
## Data: d
##
## AIC BIC logLik deviance df.resid
## 184.2 195.6 -89.1 178.2 333
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -0.7030 -0.2614 -0.1666 -0.1403 4.0585
##
## Random effects:
## Groups Name Variance Std.Dev.
## speaker (Intercept) 2.082 1.443
## Number of obs: 336, groups: speaker, 38
##
## Fixed effects:
## Estimate Std. Error z value Pr(>|z|)
## (Intercept) -1.8582 1.1204 -1.659 0.0972 .
## similarity -0.2441 0.2012 -1.213 0.2250
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation of Fixed Effects:
## (Intr)
## similarity -0.901
summary(glmer(entrain ~ likeability + (1|speaker), d, family=binomial))
## Generalized linear mixed model fit by maximum likelihood (Laplace
## Approximation) [glmerMod]
## Family: binomial ( logit )
## Formula: entrain ~ likeability + (1 | speaker)
## Data: d
##
## AIC BIC logLik deviance df.resid
## 185.4 196.9 -89.7 179.4 333
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -0.6973 -0.2759 -0.1646 -0.1510 3.7280
##
## Random effects:
## Groups Name Variance Std.Dev.
## speaker (Intercept) 2.136 1.461
## Number of obs: 336, groups: speaker, 38
##
## Fixed effects:
## Estimate Std. Error z value Pr(>|z|)
## (Intercept) -2.3480 1.6818 -1.396 0.163
## likeability -0.1255 0.2408 -0.521 0.602
##
## Correlation of Fixed Effects:
## (Intr)
## likeability -0.955
summary(glmer(entrain ~ becomeFriends + (1|speaker), d, family=binomial))
## Generalized linear mixed model fit by maximum likelihood (Laplace
## Approximation) [glmerMod]
## Family: binomial ( logit )
## Formula: entrain ~ becomeFriends + (1 | speaker)
## Data: d
##
## AIC BIC logLik deviance df.resid
## 185.4 196.8 -89.7 179.4 333
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -0.7037 -0.2773 -0.1643 -0.1522 3.7917
##
## Random effects:
## Groups Name Variance Std.Dev.
## speaker (Intercept) 2.139 1.462
## Number of obs: 336, groups: speaker, 38
##
## Fixed effects:
## Estimate Std. Error z value Pr(>|z|)
## (Intercept) -2.5479 1.2067 -2.111 0.0347 *
## becomeFriends -0.1116 0.1939 -0.576 0.5648
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation of Fixed Effects:
## (Intr)
## becomeFrnds -0.911
No effect of BFI scores.
summary(glmer(entrain ~ extraversion + (1|speaker), d, family=binomial))
## Generalized linear mixed model fit by maximum likelihood (Laplace
## Approximation) [glmerMod]
## Family: binomial ( logit )
## Formula: entrain ~ extraversion + (1 | speaker)
## Data: d
##
## AIC BIC logLik deviance df.resid
## 185.7 197.1 -89.8 179.7 333
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -0.6904 -0.2847 -0.1599 -0.1557 3.5419
##
## Random effects:
## Groups Name Variance Std.Dev.
## speaker (Intercept) 2.17 1.473
## Number of obs: 336, groups: speaker, 38
##
## Fixed effects:
## Estimate Std. Error z value Pr(>|z|)
## (Intercept) -3.5648 2.1056 -1.693 0.0904 .
## extraversion 0.1025 0.5838 0.176 0.8607
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation of Fixed Effects:
## (Intr)
## extraversin -0.971
summary(glmer(entrain ~ openness + (1|speaker), d, family=binomial))
## Generalized linear mixed model fit by maximum likelihood (Laplace
## Approximation) [glmerMod]
## Family: binomial ( logit )
## Formula: entrain ~ openness + (1 | speaker)
## Data: d
##
## AIC BIC logLik deviance df.resid
## 184.7 196.2 -89.4 178.7 333
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -0.7003 -0.2606 -0.1645 -0.1531 4.0379
##
## Random effects:
## Groups Name Variance Std.Dev.
## speaker (Intercept) 2.002 1.415
## Number of obs: 336, groups: speaker, 38
##
## Fixed effects:
## Estimate Std. Error z value Pr(>|z|)
## (Intercept) -4.6444 1.6096 -2.885 0.00391 **
## openness 0.4118 0.4167 0.988 0.32298
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation of Fixed Effects:
## (Intr)
## openness -0.953
summary(glmer(entrain ~ agreeableness + (1|speaker), d, family=binomial))
## Generalized linear mixed model fit by maximum likelihood (Laplace
## Approximation) [glmerMod]
## Family: binomial ( logit )
## Formula: entrain ~ agreeableness + (1 | speaker)
## Data: d
##
## AIC BIC logLik deviance df.resid
## 182.9 194.4 -88.5 176.9 333
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -0.7299 -0.2601 -0.1781 -0.1242 4.1805
##
## Random effects:
## Groups Name Variance Std.Dev.
## speaker (Intercept) 2.019 1.421
## Number of obs: 336, groups: speaker, 38
##
## Fixed effects:
## Estimate Std. Error z value Pr(>|z|)
## (Intercept) 0.7887 2.4373 0.324 0.746
## agreeableness -1.1713 0.7402 -1.582 0.114
##
## Correlation of Fixed Effects:
## (Intr)
## agreeablnss -0.981
summary(glmer(entrain ~ conscientiousness + (1|speaker), d, family=binomial))
## Generalized linear mixed model fit by maximum likelihood (Laplace
## Approximation) [glmerMod]
## Family: binomial ( logit )
## Formula: entrain ~ conscientiousness + (1 | speaker)
## Data: d
##
## AIC BIC logLik deviance df.resid
## 185.6 197.1 -89.8 179.6 333
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -0.6875 -0.2764 -0.1610 -0.1554 3.6182
##
## Random effects:
## Groups Name Variance Std.Dev.
## speaker (Intercept) 2.153 1.467
## Number of obs: 336, groups: speaker, 38
##
## Fixed effects:
## Estimate Std. Error z value Pr(>|z|)
## (Intercept) -2.4564 2.4059 -1.021 0.307
## conscientiousness -0.1870 0.5918 -0.316 0.752
##
## Correlation of Fixed Effects:
## (Intr)
## conscntsnss -0.978
summary(glmer(entrain ~ neuroticism + (1|speaker), d, family=binomial))
## Generalized linear mixed model fit by maximum likelihood (Laplace
## Approximation) [glmerMod]
## Family: binomial ( logit )
## Formula: entrain ~ neuroticism + (1 | speaker)
## Data: d
##
## AIC BIC logLik deviance df.resid
## 185.5 197.0 -89.8 179.5 333
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -0.6889 -0.2719 -0.1610 -0.1537 3.7687
##
## Random effects:
## Groups Name Variance Std.Dev.
## speaker (Intercept) 2.124 1.458
## Number of obs: 336, groups: speaker, 38
##
## Fixed effects:
## Estimate Std. Error z value Pr(>|z|)
## (Intercept) -2.5229 1.5947 -1.582 0.114
## neuroticism -0.2156 0.4885 -0.441 0.659
##
## Correlation of Fixed Effects:
## (Intr)
## neuroticism -0.950
(It doesn’t make sense to analyze the direction of the effect here—snychrony vs. dyssynchrony—because there were almost no synchrony effects.)
(I also wanted to link the temperature and speech datasets to analyze them in relation to each other. But there are so few speech data points with significant entrainment, that it doesn’t make sense to make this analysis.)
sessionInfo()
## R version 4.2.3 (2023-03-15 ucrt)
## Platform: x86_64-w64-mingw32/x64 (64-bit)
## Running under: Windows 10 x64 (build 22621)
##
## Matrix products: default
##
## locale:
## [1] LC_COLLATE=German_Germany.utf8 LC_CTYPE=German_Germany.utf8
## [3] LC_MONETARY=German_Germany.utf8 LC_NUMERIC=C
## [5] LC_TIME=German_Germany.utf8
##
## attached base packages:
## [1] stats graphics grDevices utils datasets methods base
##
## other attached packages:
## [1] emmeans_1.8.9 parsnip_1.1.1 broom_1.0.5 DHARMa_0.4.6
## [5] lmerTest_3.1-3 lme4_1.1-30 Matrix_1.5-3 lubridate_1.9.2
## [9] forcats_1.0.0 stringr_1.5.0 dplyr_1.1.3 purrr_1.0.2
## [13] readr_2.1.4 tidyr_1.3.0 tibble_3.2.1 ggplot2_3.4.3
## [17] tidyverse_2.0.0
##
## loaded via a namespace (and not attached):
## [1] sass_0.4.2 jsonlite_1.8.7 splines_4.2.3
## [4] here_1.0.1 bslib_0.4.0 highr_0.9
## [7] broom.mixed_0.2.9.4 yaml_2.3.5 globals_0.16.2
## [10] numDeriv_2016.8-1.1 pillar_1.9.0 backports_1.4.1
## [13] lattice_0.20-45 glue_1.6.2 digest_0.6.29
## [16] minqa_1.2.4 hardhat_1.3.0 colorspace_2.0-3
## [19] sandwich_3.0-2 htmltools_0.5.3 pkgconfig_2.0.3
## [22] listenv_0.9.0 xtable_1.8-4 mvtnorm_1.1-3
## [25] scales_1.2.1 tzdb_0.3.0 timechange_0.2.0
## [28] mgcv_1.8-40 farver_2.1.1 generics_0.1.3
## [31] ellipsis_0.3.2 furrr_0.3.1 TH.data_1.1-1
## [34] cachem_1.0.6 withr_2.5.0 nnet_7.3-19
## [37] cli_3.6.1 survival_3.5-3 magrittr_2.0.3
## [40] estimability_1.4.1 evaluate_0.23 parallelly_1.34.0
## [43] future_1.30.0 fansi_1.0.3 nlme_3.1-162
## [46] MASS_7.3-58.2 tools_4.2.3 hms_1.1.2
## [49] lifecycle_1.0.3 multcomp_1.4-23 munsell_0.5.0
## [52] compiler_4.2.3 jquerylib_0.1.4 rlang_1.1.1
## [55] grid_4.2.3 nloptr_2.0.3 rstudioapi_0.15.0
## [58] labeling_0.4.2 rmarkdown_2.16 boot_1.3-28.1
## [61] gtable_0.3.1 codetools_0.2-19 R6_2.5.1
## [64] zoo_1.8-10 knitr_1.40 fastmap_1.1.0
## [67] utf8_1.2.2 rprojroot_2.0.3 stringi_1.7.8
## [70] parallel_4.2.3 Rcpp_1.0.9 vctrs_0.6.3
## [73] tidyselect_1.2.0 xfun_0.32 coda_0.19-4